Merge
authordcubed
Wed, 10 Sep 2014 17:06:36 -0700
changeset 26685 aa239a0dfbea
parent 26682 f339669ba825 (current diff)
parent 26684 d1221849ea3d (diff)
child 26686 d7bc560b0ee9
Merge
hotspot/src/os/bsd/vm/os_bsd.cpp
hotspot/src/os/linux/vm/os_linux.cpp
hotspot/src/os/solaris/vm/os_solaris.cpp
hotspot/src/os/windows/vm/os_windows.cpp
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Sep 10 17:06:36 2014 -0700
@@ -106,18 +106,18 @@
 # include <sys/syscall.h>
 
 #if defined(__FreeBSD__) || defined(__NetBSD__)
-# include <elf.h>
+  #include <elf.h>
 #endif
 
 #ifdef __APPLE__
-# include <mach/mach.h> // semaphore_* API
-# include <mach-o/dyld.h>
-# include <sys/proc_info.h>
-# include <objc/objc-auto.h>
+  #include <mach/mach.h> // semaphore_* API
+  #include <mach-o/dyld.h>
+  #include <sys/proc_info.h>
+  #include <objc/objc-auto.h>
 #endif
 
 #ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
+  #define MAP_ANONYMOUS MAP_ANON
 #endif
 
 #define MAX_PATH    (2 * K)
@@ -152,9 +152,9 @@
 
 static pid_t _initial_pid = 0;
 
-/* Signal number used to suspend/resume a thread */
-
-/* do not use any signal number less than SIGSEGV, see 4355769 */
+// Signal number used to suspend/resume a thread
+
+// do not use any signal number less than SIGSEGV, see 4355769
 static int SR_signum = SIGUSR2;
 sigset_t SR_sigset;
 
@@ -232,20 +232,20 @@
 #elif defined(PPC32)
 static char cpu_arch[] = "ppc";
 #elif defined(SPARC)
-#  ifdef _LP64
+  #ifdef _LP64
 static char cpu_arch[] = "sparcv9";
-#  else
+  #else
 static char cpu_arch[] = "sparc";
-#  endif
+  #endif
 #else
-#error Add appropriate cpu_arch setting
+  #error Add appropriate cpu_arch setting
 #endif
 
 // Compiler variant
 #ifdef COMPILER2
-#define COMPILER_VARIANT "server"
+  #define COMPILER_VARIANT "server"
 #else
-#define COMPILER_VARIANT "client"
+  #define COMPILER_VARIANT "client"
 #endif
 
 
@@ -255,21 +255,19 @@
   int cpu_val;
   julong mem_val;
 
-  /* get processors count via hw.ncpus sysctl */
+  // get processors count via hw.ncpus sysctl
   mib[0] = CTL_HW;
   mib[1] = HW_NCPU;
   len = sizeof(cpu_val);
   if (sysctl(mib, 2, &cpu_val, &len, NULL, 0) != -1 && cpu_val >= 1) {
-       assert(len == sizeof(cpu_val), "unexpected data size");
-       set_processor_count(cpu_val);
+    assert(len == sizeof(cpu_val), "unexpected data size");
+    set_processor_count(cpu_val);
+  } else {
+    set_processor_count(1);   // fallback
   }
-  else {
-       set_processor_count(1);   // fallback
-  }
-
-  /* get physical memory via hw.memsize sysctl (hw.memsize is used
-   * since it returns a 64 bit value)
-   */
+
+  // get physical memory via hw.memsize sysctl (hw.memsize is used
+  // since it returns a 64 bit value)
   mib[0] = CTL_HW;
 
 #if defined (HW_MEMSIZE) // Apple
@@ -284,19 +282,19 @@
 
   len = sizeof(mem_val);
   if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1) {
-       assert(len == sizeof(mem_val), "unexpected data size");
-       _physical_memory = mem_val;
+    assert(len == sizeof(mem_val), "unexpected data size");
+    _physical_memory = mem_val;
   } else {
-       _physical_memory = 256*1024*1024;       // fallback (XXXBSD?)
+    _physical_memory = 256 * 1024 * 1024;       // fallback (XXXBSD?)
   }
 
 #ifdef __OpenBSD__
   {
-       // limit _physical_memory memory view on OpenBSD since
-       // datasize rlimit restricts us anyway.
-       struct rlimit limits;
-       getrlimit(RLIMIT_DATA, &limits);
-       _physical_memory = MIN2(_physical_memory, (julong)limits.rlim_cur);
+    // limit _physical_memory memory view on OpenBSD since
+    // datasize rlimit restricts us anyway.
+    struct rlimit limits;
+    getrlimit(RLIMIT_DATA, &limits);
+    _physical_memory = MIN2(_physical_memory, (julong)limits.rlim_cur);
   }
 #endif
 }
@@ -342,14 +340,14 @@
   // Important note: if the location of libjvm.so changes this
   // code needs to be changed accordingly.
 
-// See ld(1):
-//      The linker uses the following search paths to locate required
-//      shared libraries:
-//        1: ...
-//        ...
-//        7: The default directories, normally /lib and /usr/lib.
+  // See ld(1):
+  //      The linker uses the following search paths to locate required
+  //      shared libraries:
+  //        1: ...
+  //        ...
+  //        7: The default directories, normally /lib and /usr/lib.
 #ifndef DEFAULT_LIBPATH
-#define DEFAULT_LIBPATH "/lib:/usr/lib"
+  #define DEFAULT_LIBPATH "/lib:/usr/lib"
 #endif
 
 // Base path of extensions installed on the system.
@@ -435,8 +433,8 @@
 
 #else // __APPLE__
 
-#define SYS_EXTENSIONS_DIR   "/Library/Java/Extensions"
-#define SYS_EXTENSIONS_DIRS  SYS_EXTENSIONS_DIR ":/Network" SYS_EXTENSIONS_DIR ":/System" SYS_EXTENSIONS_DIR ":/usr/lib/java"
+  #define SYS_EXTENSIONS_DIR   "/Library/Java/Extensions"
+  #define SYS_EXTENSIONS_DIRS  SYS_EXTENSIONS_DIR ":/Network" SYS_EXTENSIONS_DIR ":/System" SYS_EXTENSIONS_DIR ":/usr/lib/java"
 
   const char *user_home_dir = get_home();
   // The null in SYS_EXTENSIONS_DIRS counts for the size of the colon after user_home_dir.
@@ -561,14 +559,15 @@
 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 
 bool os::Bsd::is_sig_ignored(int sig) {
-      struct sigaction oact;
-      sigaction(sig, (struct sigaction*)NULL, &oact);
-      void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
-                                     : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
-      if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
-           return true;
-      else
-           return false;
+  struct sigaction oact;
+  sigaction(sig, (struct sigaction*)NULL, &oact);
+  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
+                                 : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
+  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
+    return true;
+  } else {
+    return false;
+  }
 }
 
 void os::Bsd::signal_sets_init() {
@@ -596,23 +595,24 @@
   sigaddset(&unblocked_sigs, SR_signum);
 
   if (!ReduceSignalUsage) {
-   if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
+    if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
-   }
-   if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
+    }
+    if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
-   }
-   if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
+    }
+    if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
-   }
+    }
   }
   // Fill in signals that are blocked by all but the VM thread.
   sigemptyset(&vm_sigs);
-  if (!ReduceSignalUsage)
+  if (!ReduceSignalUsage) {
     sigaddset(&vm_sigs, BREAK_SIGNAL);
+  }
   debug_only(signal_sets_initialized = true);
 
 }
@@ -671,8 +671,8 @@
 #ifdef __APPLE__
 // library handle for calling objc_registerThreadWithCollector()
 // without static linking to the libobjc library
-#define OBJC_LIB "/usr/lib/libobjc.dylib"
-#define OBJC_GCREGISTER "objc_registerThreadWithCollector"
+  #define OBJC_LIB "/usr/lib/libobjc.dylib"
+  #define OBJC_GCREGISTER "objc_registerThreadWithCollector"
 typedef void (*objc_registerThreadWithCollector_t)();
 extern "C" objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction;
 objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction = NULL;
@@ -846,9 +846,9 @@
 
   // Aborted due to thread limit being reached
   if (state == ZOMBIE) {
-      thread->set_osthread(NULL);
-      delete osthread;
-      return false;
+    thread->set_osthread(NULL);
+    delete osthread;
+    return false;
   }
 
   // The thread is returned suspended (in state INITIALIZED),
@@ -868,7 +868,7 @@
 
 bool os::create_attached_thread(JavaThread* thread) {
 #ifdef ASSERT
-    thread->verify_not_published();
+  thread->verify_not_published();
 #endif
 
   // Allocate the OSThread object
@@ -919,7 +919,7 @@
     // Restore caller's signal mask
     sigset_t sigmask = osthread->caller_sigmask();
     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
-   }
+  }
 
   delete osthread;
 }
@@ -997,9 +997,9 @@
 }
 
 #ifndef __APPLE__
-#ifndef CLOCK_MONOTONIC
-#define CLOCK_MONOTONIC (1)
-#endif
+  #ifndef CLOCK_MONOTONIC
+    #define CLOCK_MONOTONIC (1)
+  #endif
 #endif
 
 #ifdef __APPLE__
@@ -1023,27 +1023,27 @@
 #ifdef __APPLE__
 
 jlong os::javaTimeNanos() {
-    const uint64_t tm = mach_absolute_time();
-    const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom;
-    const uint64_t prev = Bsd::_max_abstime;
-    if (now <= prev) {
-      return prev;   // same or retrograde time;
-    }
-    const uint64_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&Bsd::_max_abstime, prev);
-    assert(obsv >= prev, "invariant");   // Monotonicity
-    // If the CAS succeeded then we're done and return "now".
-    // If the CAS failed and the observed value "obsv" is >= now then
-    // we should return "obsv".  If the CAS failed and now > obsv > prv then
-    // some other thread raced this thread and installed a new value, in which case
-    // we could either (a) retry the entire operation, (b) retry trying to install now
-    // or (c) just return obsv.  We use (c).   No loop is required although in some cases
-    // we might discard a higher "now" value in deference to a slightly lower but freshly
-    // installed obsv value.   That's entirely benign -- it admits no new orderings compared
-    // to (a) or (b) -- and greatly reduces coherence traffic.
-    // We might also condition (c) on the magnitude of the delta between obsv and now.
-    // Avoiding excessive CAS operations to hot RW locations is critical.
-    // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
-    return (prev == obsv) ? now : obsv;
+  const uint64_t tm = mach_absolute_time();
+  const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom;
+  const uint64_t prev = Bsd::_max_abstime;
+  if (now <= prev) {
+    return prev;   // same or retrograde time;
+  }
+  const uint64_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&Bsd::_max_abstime, prev);
+  assert(obsv >= prev, "invariant");   // Monotonicity
+  // If the CAS succeeded then we're done and return "now".
+  // If the CAS failed and the observed value "obsv" is >= now then
+  // we should return "obsv".  If the CAS failed and now > obsv > prv then
+  // some other thread raced this thread and installed a new value, in which case
+  // we could either (a) retry the entire operation, (b) retry trying to install now
+  // or (c) just return obsv.  We use (c).   No loop is required although in some cases
+  // we might discard a higher "now" value in deference to a slightly lower but freshly
+  // installed obsv value.   That's entirely benign -- it admits no new orderings compared
+  // to (a) or (b) -- and greatly reduces coherence traffic.
+  // We might also condition (c) on the magnitude of the delta between obsv and now.
+  // Avoiding excessive CAS operations to hot RW locations is critical.
+  // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
+  return (prev == obsv) ? now : obsv;
 }
 
 #else // __APPLE__
@@ -1176,7 +1176,6 @@
 // from src/solaris/hpi/src/system_md.c
 
 size_t os::lasterror(char *buf, size_t len) {
-
   if (errno == 0)  return 0;
 
   const char *s = ::strerror(errno);
@@ -1246,9 +1245,9 @@
 
 #define JNI_LIB_PREFIX "lib"
 #ifdef __APPLE__
-#define JNI_LIB_SUFFIX ".dylib"
+  #define JNI_LIB_SUFFIX ".dylib"
 #else
-#define JNI_LIB_SUFFIX ".so"
+  #define JNI_LIB_SUFFIX ".so"
 #endif
 
 const char* os::dll_file_extension() { return JNI_LIB_SUFFIX; }
@@ -1269,9 +1268,9 @@
   }
   return temp_path;
 }
-#else /* __APPLE__ */
+#else // __APPLE__
 const char* os::get_temp_directory() { return "/tmp"; }
-#endif /* __APPLE__ */
+#endif // __APPLE__
 
 static bool file_exists(const char* filename) {
   struct stat statbuf;
@@ -1307,7 +1306,7 @@
         continue; // skip the empty path values
       }
       snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX,
-          pelements[i], fname);
+               pelements[i], fname);
       if (file_exists(buffer)) {
         retval = true;
         break;
@@ -1372,14 +1371,13 @@
     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
                           buf, buflen, offset, dlinfo.dli_fname)) {
-         return true;
+        return true;
       }
     }
 
     // Handle non-dynamic manually:
     if (dlinfo.dli_fbase != NULL &&
-        Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset,
-                        dlinfo.dli_fbase)) {
+        Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset, dlinfo.dli_fbase)) {
       if (!Decoder::demangle(localbuf, buf, buflen)) {
         jio_snprintf(buf, buflen, "%s", localbuf);
       }
@@ -1433,8 +1431,7 @@
   return NULL;
 }
 #else
-void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
-{
+void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
   void * result= ::dlopen(filename, RTLD_LAZY);
   if (result != NULL) {
     // Successful loading
@@ -1465,7 +1462,7 @@
 
   bool failed_to_read_elf_head=
     (sizeof(elf_head)!=
-        (::read(file_descriptor, &elf_head,sizeof(elf_head))));
+     (::read(file_descriptor, &elf_head,sizeof(elf_head))));
 
   ::close(file_descriptor);
   if (failed_to_read_elf_head) {
@@ -1482,27 +1479,27 @@
   } arch_t;
 
   #ifndef EM_486
-  #define EM_486          6               /* Intel 80486 */
+    #define EM_486          6               /* Intel 80486 */
   #endif
 
   #ifndef EM_MIPS_RS3_LE
-  #define EM_MIPS_RS3_LE  10              /* MIPS */
+    #define EM_MIPS_RS3_LE  10              /* MIPS */
   #endif
 
   #ifndef EM_PPC64
-  #define EM_PPC64        21              /* PowerPC64 */
+    #define EM_PPC64        21              /* PowerPC64 */
   #endif
 
   #ifndef EM_S390
-  #define EM_S390         22              /* IBM System/390 */
+    #define EM_S390         22              /* IBM System/390 */
   #endif
 
   #ifndef EM_IA_64
-  #define EM_IA_64        50              /* HP/Intel IA-64 */
+    #define EM_IA_64        50              /* HP/Intel IA-64 */
   #endif
 
   #ifndef EM_X86_64
-  #define EM_X86_64       62              /* AMD x86-64 */
+    #define EM_X86_64       62              /* AMD x86-64 */
   #endif
 
   static const arch_t arch_array[]={
@@ -1525,33 +1522,33 @@
   };
 
   #if  (defined IA32)
-    static  Elf32_Half running_arch_code=EM_386;
+  static  Elf32_Half running_arch_code=EM_386;
   #elif   (defined AMD64)
-    static  Elf32_Half running_arch_code=EM_X86_64;
+  static  Elf32_Half running_arch_code=EM_X86_64;
   #elif  (defined IA64)
-    static  Elf32_Half running_arch_code=EM_IA_64;
+  static  Elf32_Half running_arch_code=EM_IA_64;
   #elif  (defined __sparc) && (defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARCV9;
+  static  Elf32_Half running_arch_code=EM_SPARCV9;
   #elif  (defined __sparc) && (!defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARC;
+  static  Elf32_Half running_arch_code=EM_SPARC;
   #elif  (defined __powerpc64__)
-    static  Elf32_Half running_arch_code=EM_PPC64;
+  static  Elf32_Half running_arch_code=EM_PPC64;
   #elif  (defined __powerpc__)
-    static  Elf32_Half running_arch_code=EM_PPC;
+  static  Elf32_Half running_arch_code=EM_PPC;
   #elif  (defined ARM)
-    static  Elf32_Half running_arch_code=EM_ARM;
+  static  Elf32_Half running_arch_code=EM_ARM;
   #elif  (defined S390)
-    static  Elf32_Half running_arch_code=EM_S390;
+  static  Elf32_Half running_arch_code=EM_S390;
   #elif  (defined ALPHA)
-    static  Elf32_Half running_arch_code=EM_ALPHA;
+  static  Elf32_Half running_arch_code=EM_ALPHA;
   #elif  (defined MIPSEL)
-    static  Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
+  static  Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
   #elif  (defined PARISC)
-    static  Elf32_Half running_arch_code=EM_PARISC;
+  static  Elf32_Half running_arch_code=EM_PARISC;
   #elif  (defined MIPS)
-    static  Elf32_Half running_arch_code=EM_MIPS;
+  static  Elf32_Half running_arch_code=EM_MIPS;
   #elif  (defined M68K)
-    static  Elf32_Half running_arch_code=EM_68K;
+  static  Elf32_Half running_arch_code=EM_68K;
   #else
     #error Method os::dll_load requires that one of following is defined:\
          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
@@ -1574,7 +1571,7 @@
   }
 
   assert(running_arch_index != -1,
-    "Didn't find running architecture code (running_arch_code) in arch_array");
+         "Didn't find running architecture code (running_arch_code) in arch_array");
   if (running_arch_index == -1) {
     // Even though running architecture detection failed
     // we may still continue with reporting dlerror() message
@@ -1596,19 +1593,19 @@
   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
     if (lib_arch.name!=NULL) {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-        " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
-        lib_arch.name, arch_array[running_arch_index].name);
+                 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
+                 lib_arch.name, arch_array[running_arch_index].name);
     } else {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-      " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
-        lib_arch.code,
-        arch_array[running_arch_index].name);
+                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
+                 lib_arch.code,
+                 arch_array[running_arch_index].name);
     }
   }
 
   return NULL;
 }
-#endif /* !__APPLE__ */
+#endif // !__APPLE__
 
 void* os::get_default_process_handle() {
 #ifdef __APPLE__
@@ -1630,7 +1627,7 @@
 static bool _print_ascii_file(const char* filename, outputStream* st) {
   int fd = ::open(filename, O_RDONLY);
   if (fd == -1) {
-     return false;
+    return false;
   }
 
   char buf[32];
@@ -1797,15 +1794,16 @@
 
   char dli_fname[MAXPATHLEN];
   bool ret = dll_address_to_library_name(
-                CAST_FROM_FN_PTR(address, os::jvm_path),
-                dli_fname, sizeof(dli_fname), NULL);
+                                         CAST_FROM_FN_PTR(address, os::jvm_path),
+                                         dli_fname, sizeof(dli_fname), NULL);
   assert(ret, "cannot locate libjvm");
   char *rp = NULL;
   if (ret && dli_fname[0] != '\0') {
     rp = realpath(dli_fname, buf);
   }
-  if (rp == NULL)
+  if (rp == NULL) {
     return;
+  }
 
   if (Arguments::sun_java_launcher_is_altjvm()) {
     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
@@ -1834,8 +1832,9 @@
         assert(strstr(p, "/libjvm") == p, "invalid library name");
 
         rp = realpath(java_home_var, buf);
-        if (rp == NULL)
+        if (rp == NULL) {
           return;
+        }
 
         // determine if this is a legacy image or modules image
         // modules image doesn't have "jre" subdirectory
@@ -1867,8 +1866,9 @@
         } else {
           // Fall back to path of current library
           rp = realpath(dli_fname, buf);
-          if (rp == NULL)
+          if (rp == NULL) {
             return;
+          }
         }
       }
     }
@@ -1890,18 +1890,18 @@
 
 static volatile jint sigint_count = 0;
 
-static void
-UserHandler(int sig, void *siginfo, void *context) {
+static void UserHandler(int sig, void *siginfo, void *context) {
   // 4511530 - sem_post is serialized and handled by the manager thread. When
   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
   // don't want to flood the manager thread with sem_post requests.
-  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
-      return;
+  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1) {
+    return;
+  }
 
   // Ctrl-C is pressed during error reporting, likely because the error
   // handler fails to abort. Let VM die immediately.
   if (sig == SIGINT && is_error_reported()) {
-     os::die();
+    os::die();
   }
 
   os::signal_notify(sig);
@@ -1935,10 +1935,8 @@
   ::raise(signal_number);
 }
 
-/*
- * The following code is moved from os.cpp for making this
- * code platform specific, which it is by its very nature.
- */
+// The following code is moved from os.cpp for making this
+// code platform specific, which it is by its very nature.
 
 // Will be modified when max signal is changed to be dynamic
 int os::sigexitnum_pd() {
@@ -1951,29 +1949,31 @@
 // Bsd(POSIX) specific hand shaking semaphore.
 #ifdef __APPLE__
 typedef semaphore_t os_semaphore_t;
-#define SEM_INIT(sem, value)    semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, value)
-#define SEM_WAIT(sem)           semaphore_wait(sem)
-#define SEM_POST(sem)           semaphore_signal(sem)
-#define SEM_DESTROY(sem)        semaphore_destroy(mach_task_self(), sem)
+
+  #define SEM_INIT(sem, value)    semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, value)
+  #define SEM_WAIT(sem)           semaphore_wait(sem)
+  #define SEM_POST(sem)           semaphore_signal(sem)
+  #define SEM_DESTROY(sem)        semaphore_destroy(mach_task_self(), sem)
 #else
 typedef sem_t os_semaphore_t;
-#define SEM_INIT(sem, value)    sem_init(&sem, 0, value)
-#define SEM_WAIT(sem)           sem_wait(&sem)
-#define SEM_POST(sem)           sem_post(&sem)
-#define SEM_DESTROY(sem)        sem_destroy(&sem)
+
+  #define SEM_INIT(sem, value)    sem_init(&sem, 0, value)
+  #define SEM_WAIT(sem)           sem_wait(&sem)
+  #define SEM_POST(sem)           sem_post(&sem)
+  #define SEM_DESTROY(sem)        sem_destroy(&sem)
 #endif
 
 class Semaphore : public StackObj {
-  public:
-    Semaphore();
-    ~Semaphore();
-    void signal();
-    void wait();
-    bool trywait();
-    bool timedwait(unsigned int sec, int nsec);
-  private:
-    jlong currenttime() const;
-    os_semaphore_t _semaphore;
+ public:
+  Semaphore();
+  ~Semaphore();
+  void signal();
+  void wait();
+  bool trywait();
+  bool timedwait(unsigned int sec, int nsec);
+ private:
+  jlong currenttime() const;
+  os_semaphore_t _semaphore;
 };
 
 Semaphore::Semaphore() : _semaphore(0) {
@@ -1993,9 +1993,9 @@
 }
 
 jlong Semaphore::currenttime() const {
-    struct timeval tv;
-    gettimeofday(&tv, NULL);
-    return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000);
+  struct timeval tv;
+  gettimeofday(&tv, NULL);
+  return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000);
 }
 
 #ifdef __APPLE__
@@ -2099,12 +2099,10 @@
       // were we externally suspended while we were waiting?
       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
       if (threadIsSuspended) {
-        //
         // The semaphore has been incremented, but while we were waiting
         // another thread suspended us. We don't want to continue running
         // while suspended because that would surprise the thread that
         // suspended us.
-        //
         ::SEM_POST(sig_sem);
 
         thread->java_suspend_self();
@@ -2192,7 +2190,7 @@
   }
 #else
   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
-                                   MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
+                                     MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
   if (res != (uintptr_t) MAP_FAILED) {
     return true;
   }
@@ -2206,7 +2204,7 @@
 }
 
 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
-                       bool exec) {
+                          bool exec) {
   // alignment_hint is ignored on this OS
   return pd_commit_memory(addr, size, exec);
 }
@@ -2274,7 +2272,7 @@
   return ::mprotect(addr, size, PROT_NONE) == 0;
 #else
   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
-                MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
+                                     MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
   return res  != (uintptr_t) MAP_FAILED;
 #endif
 }
@@ -2335,7 +2333,7 @@
 }
 
 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
-                         size_t alignment_hint) {
+                            size_t alignment_hint) {
   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
 }
 
@@ -2405,32 +2403,31 @@
   char *addr;
 
   bool warn_on_failure = UseLargePages &&
-                        (!FLAG_IS_DEFAULT(UseLargePages) ||
-                         !FLAG_IS_DEFAULT(LargePageSizeInBytes)
-                        );
+                         (!FLAG_IS_DEFAULT(UseLargePages) ||
+                          !FLAG_IS_DEFAULT(LargePageSizeInBytes));
 
   // Create a large shared memory region to attach to based on size.
   // Currently, size is the total size of the heap
   int shmid = shmget(key, bytes, IPC_CREAT|SHM_R|SHM_W);
   if (shmid == -1) {
-     // Possible reasons for shmget failure:
-     // 1. shmmax is too small for Java heap.
-     //    > check shmmax value: cat /proc/sys/kernel/shmmax
-     //    > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
-     // 2. not enough large page memory.
-     //    > check available large pages: cat /proc/meminfo
-     //    > increase amount of large pages:
-     //          echo new_value > /proc/sys/vm/nr_hugepages
-     //      Note 1: different Bsd may use different name for this property,
-     //            e.g. on Redhat AS-3 it is "hugetlb_pool".
-     //      Note 2: it's possible there's enough physical memory available but
-     //            they are so fragmented after a long run that they can't
-     //            coalesce into large pages. Try to reserve large pages when
-     //            the system is still "fresh".
-     if (warn_on_failure) {
-       warning("Failed to reserve shared memory (errno = %d).", errno);
-     }
-     return NULL;
+    // Possible reasons for shmget failure:
+    // 1. shmmax is too small for Java heap.
+    //    > check shmmax value: cat /proc/sys/kernel/shmmax
+    //    > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
+    // 2. not enough large page memory.
+    //    > check available large pages: cat /proc/meminfo
+    //    > increase amount of large pages:
+    //          echo new_value > /proc/sys/vm/nr_hugepages
+    //      Note 1: different Bsd may use different name for this property,
+    //            e.g. on Redhat AS-3 it is "hugetlb_pool".
+    //      Note 2: it's possible there's enough physical memory available but
+    //            they are so fragmented after a long run that they can't
+    //            coalesce into large pages. Try to reserve large pages when
+    //            the system is still "fresh".
+    if (warn_on_failure) {
+      warning("Failed to reserve shared memory (errno = %d).", errno);
+    }
+    return NULL;
   }
 
   // attach to the region
@@ -2444,10 +2441,10 @@
   shmctl(shmid, IPC_RMID, NULL);
 
   if ((intptr_t)addr == -1) {
-     if (warn_on_failure) {
-       warning("Failed to attach shared memory (errno = %d).", err);
-     }
-     return NULL;
+    if (warn_on_failure) {
+      warning("Failed to attach shared memory (errno = %d).", err);
+    }
+    return NULL;
   }
 
   // The memory is committed
@@ -2518,12 +2515,12 @@
   // if kernel honors the hint then we can return immediately.
   char * addr = anon_mmap(requested_addr, bytes, false);
   if (addr == requested_addr) {
-     return requested_addr;
+    return requested_addr;
   }
 
   if (addr != NULL) {
-     // mmap() is successful but it fails to reserve at the requested address
-     anon_munmap(addr, bytes);
+    // mmap() is successful but it fails to reserve at the requested address
+    anon_munmap(addr, bytes);
   }
 
   int i;
@@ -2585,8 +2582,7 @@
   req.tv_sec = 0;
   if (ms > 0) {
     req.tv_nsec = (ms % 1000) * 1000000;
-  }
-  else {
+  } else {
     req.tv_nsec = 1;
   }
 
@@ -2649,7 +2645,7 @@
   31               // 11 CriticalPriority
 };
 #else
-/* Using Mach high-level priority assignments */
+// Using Mach high-level priority assignments
 int os::java_to_os_priority[CriticalPriority + 1] = {
    0,              // 0 Entry should never be used (MINPRI_USER)
 
@@ -2702,12 +2698,14 @@
   int policy;
   pthread_t self = pthread_self();
 
-  if (pthread_getschedparam(self, &policy, &sp) != 0)
+  if (pthread_getschedparam(self, &policy, &sp) != 0) {
     return OS_ERR;
+  }
 
   sp.sched_priority = newpri;
-  if (pthread_setschedparam(self, policy, &sp) != 0)
+  if (pthread_setschedparam(self, policy, &sp) != 0) {
     return OS_ERR;
+  }
 
   return OS_OK;
 #else
@@ -2763,7 +2761,6 @@
 //      - sends signal to end the sigsuspend loop in the SR_handler
 //
 //  Note that the SR_lock plays no role in this suspend/resume protocol.
-//
 
 static void resume_clear_context(OSThread *osthread) {
   osthread->set_ucontext(NULL);
@@ -2775,7 +2772,6 @@
   osthread->set_siginfo(siginfo);
 }
 
-//
 // Handler function invoked when a thread's execution is suspended or
 // resumed. We have to be careful that only async-safe functions are
 // called here (Note: most pthread functions are not async safe and
@@ -2847,21 +2843,21 @@
 static int SR_initialize() {
   struct sigaction act;
   char *s;
-  /* Get signal number to use for suspend/resume */
+  // Get signal number to use for suspend/resume
   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
     int sig = ::strtol(s, 0, 10);
     if (sig > 0 || sig < NSIG) {
-        SR_signum = sig;
+      SR_signum = sig;
     }
   }
 
   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
-        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
+         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
 
   sigemptyset(&SR_sigset);
   sigaddset(&SR_sigset, SR_signum);
 
-  /* Set up signal handler for suspend/resume */
+  // Set up signal handler for suspend/resume
   act.sa_flags = SA_RESTART|SA_SIGINFO;
   act.sa_handler = (void (*)(int)) SR_handler;
 
@@ -2987,9 +2983,9 @@
 // Note that the VM will print warnings if it detects conflicting signal
 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
 //
-extern "C" JNIEXPORT int
-JVM_handle_bsd_signal(int signo, siginfo_t* siginfo,
-                        void* ucontext, int abort_if_unrecognized);
+extern "C" JNIEXPORT int JVM_handle_bsd_signal(int signo, siginfo_t* siginfo,
+                                               void* ucontext,
+                                               int abort_if_unrecognized);
 
 void signalHandler(int sig, siginfo_t* info, void* uc) {
   assert(info != NULL && uc != NULL, "it must be old kernel");
@@ -3180,12 +3176,12 @@
     signal_setting_t begin_signal_setting = NULL;
     signal_setting_t end_signal_setting = NULL;
     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
-                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
+                                          dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
     if (begin_signal_setting != NULL) {
       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
-                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
+                                          dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
-                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
+                                         dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
       libjsig_is_loaded = true;
       assert(UseSignalChaining, "should enable signal-chaining");
     }
@@ -3215,10 +3211,10 @@
     // exception handling, while leaving the standard BSD signal handlers functional.
     kern_return_t kr;
     kr = task_set_exception_ports(mach_task_self(),
-        EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC,
-        MACH_PORT_NULL,
-        EXCEPTION_STATE_IDENTITY,
-        MACHINE_THREAD_STATE);
+                                  EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC,
+                                  MACH_PORT_NULL,
+                                  EXCEPTION_STATE_IDENTITY,
+                                  MACHINE_THREAD_STATE);
 
     assert(kr == KERN_SUCCESS, "could not set mach task signal handler");
 #endif
@@ -3255,7 +3251,7 @@
 // We will never set this flag, and we should
 // ignore this flag in our diagnostic
 #ifdef SIGNIFICANT_SIGNAL_MASK
-#undef SIGNIFICANT_SIGNAL_MASK
+  #undef SIGNIFICANT_SIGNAL_MASK
 #endif
 #define SIGNIFICANT_SIGNAL_MASK (~0x04000000)
 
@@ -3314,7 +3310,7 @@
 
   // Check: is it our handler?
   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
-     handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
+      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
     // It is our signal handler
     // check for flags, reset system-used one!
     if ((int)sa.sa_flags != os::Bsd::get_our_sigflags(sig)) {
@@ -3327,9 +3323,12 @@
 }
 
 
-#define DO_SIGNAL_CHECK(sig) \
-  if (!sigismember(&check_signal_done, sig)) \
-    os::Bsd::check_signal_handler(sig)
+#define DO_SIGNAL_CHECK(sig)                      \
+  do {                                            \
+    if (!sigismember(&check_signal_done, sig)) {  \
+      os::Bsd::check_signal_handler(sig);         \
+    }                                             \
+  } while (0)
 
 // This method is a periodic task to check for misbehaving JNI applications
 // under CheckJNI, we can add any periodic checks here
@@ -3444,7 +3443,8 @@
   }
 }
 
-extern void report_error(char* file_name, int line_no, char* title, char* format, ...);
+extern void report_error(char* file_name, int line_no, char* title,
+                         char* format, ...);
 
 extern bool signal_name(int signo, char* buf, size_t len);
 
@@ -3462,7 +3462,7 @@
 
 // this is called _before_ the most of global arguments have been parsed
 void os::init(void) {
-  char dummy;   /* used to get a guess on initial stack address */
+  char dummy;   // used to get a guess on initial stack address
 //  first_hrtime = gethrtime();
 
   // With BsdThreads the JavaMain thread pid (primordial thread)
@@ -3515,8 +3515,7 @@
 }
 
 // this is called _after_ the global arguments have been parsed
-jint os::init_2(void)
-{
+jint os::init_2(void) {
   // Allocate a single page and mark it as readable for safepoint polling
   address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
   guarantee(polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page");
@@ -3524,8 +3523,10 @@
   os::set_polling_page(polling_page);
 
 #ifndef PRODUCT
-  if (Verbose && PrintMiscellaneous)
-    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
+  if (Verbose && PrintMiscellaneous) {
+    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
+               (intptr_t)polling_page);
+  }
 #endif
 
   if (!UseMembar) {
@@ -3534,8 +3535,10 @@
     os::set_memory_serialize_page(mem_serialize_page);
 
 #ifndef PRODUCT
-    if (Verbose && PrintMiscellaneous)
-      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
+    if (Verbose && PrintMiscellaneous) {
+      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
+                 (intptr_t)mem_serialize_page);
+    }
 #endif
   }
 
@@ -3554,22 +3557,22 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
-            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
-                    2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
+                                    (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+                                    2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
       threadStackSizeInBytes < os::Bsd::min_stack_allowed) {
-        tty->print_cr("\nThe stack size specified is too small, "
-                      "Specify at least %dk",
-                      os::Bsd::min_stack_allowed/ K);
-        return JNI_ERR;
+    tty->print_cr("\nThe stack size specified is too small, "
+                  "Specify at least %dk",
+                  os::Bsd::min_stack_allowed/ K);
+    return JNI_ERR;
   }
 
   // Make the stack size a multiple of the page size so that
   // the yellow/red zones can be guarded.
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
-        vm_page_size()));
+                                                vm_page_size()));
 
   if (MaxFDLimit) {
     // set the number of file descriptors to max. print out error
@@ -3577,8 +3580,9 @@
     struct rlimit nbr_files;
     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
     if (status != 0) {
-      if (PrintMiscellaneous && (Verbose || WizardMode))
+      if (PrintMiscellaneous && (Verbose || WizardMode)) {
         perror("os::init_2 getrlimit failed");
+      }
     } else {
       nbr_files.rlim_cur = nbr_files.rlim_max;
 
@@ -3591,8 +3595,9 @@
 
       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
       if (status != 0) {
-        if (PrintMiscellaneous && (Verbose || WizardMode))
+        if (PrintMiscellaneous && (Verbose || WizardMode)) {
           perror("os::init_2 setrlimit failed");
+        }
       }
     }
   }
@@ -3635,16 +3640,17 @@
 
 // Mark the polling page as unreadable
 void os::make_polling_page_unreadable(void) {
-  if (!guard_memory((char*)_polling_page, Bsd::page_size()))
+  if (!guard_memory((char*)_polling_page, Bsd::page_size())) {
     fatal("Could not disable polling page");
-};
+  }
+}
 
 // Mark the polling page as readable
 void os::make_polling_page_readable(void) {
   if (!bsd_mprotect((char *)_polling_page, Bsd::page_size(), PROT_READ)) {
     fatal("Could not enable polling page");
   }
-};
+}
 
 int os::active_processor_count() {
   return _processor_count;
@@ -3682,12 +3688,12 @@
 
 ///
 class PcFetcher : public os::SuspendedThreadTask {
-public:
+ public:
   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
   ExtendedPC result();
-protected:
+ protected:
   void do_task(const os::SuspendedThreadTaskContext& context);
-private:
+ private:
   ExtendedPC _epc;
 };
 
@@ -3719,8 +3725,9 @@
   return fetcher.result();
 }
 
-int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
-{
+int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond,
+                                 pthread_mutex_t *_mutex,
+                                 const struct timespec *_abstime) {
   return pthread_cond_timedwait(_cond, _mutex, _abstime);
 }
 
@@ -3734,7 +3741,7 @@
     st->print(PTR_FORMAT ": ", addr);
     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
       st->print("%s+%#x", dlinfo.dli_sname,
-                 addr - (intptr_t)dlinfo.dli_saddr);
+                addr - (intptr_t)dlinfo.dli_saddr);
     } else if (dlinfo.dli_fbase != NULL) {
       st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
     } else {
@@ -3757,8 +3764,9 @@
       if (begin < lowest)  begin = lowest;
       Dl_info dlinfo2;
       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
-          && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
+          && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
         end = (address) dlinfo2.dli_saddr;
+      }
       Disassembler::decode(begin, end, st);
     }
     return true;
@@ -3772,9 +3780,9 @@
 // This does not do anything on Bsd. This is basically a hook for being
 // able to use structured exception handling (thread-local exception filters)
 // on, e.g., Win32.
-void
-os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
-                         JavaCallArguments* args, Thread* thread) {
+void os::os_exception_wrapper(java_call_t f, JavaValue* value,
+                              methodHandle* method, JavaCallArguments* args,
+                              Thread* thread) {
   f(value, method, args, thread);
 }
 
@@ -3815,7 +3823,8 @@
 }
 
 ATTRIBUTE_PRINTF(3, 0)
-int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
+int local_vsnprintf(char* buf, size_t count, const char* format,
+                    va_list args) {
   return ::vsnprintf(buf, count, format, args);
 }
 
@@ -3827,7 +3836,7 @@
   dir = opendir(path);
   if (dir == NULL) return true;
 
-  /* Scan the directory */
+  // Scan the directory
   bool result = true;
   char buf[sizeof(struct dirent) + MAX_PATH];
   while (result && (ptr = ::readdir(dir)) != NULL) {
@@ -3843,7 +3852,7 @@
 // from src/solaris/hpi/src/system_md.c
 
 #ifndef O_DELETE
-#define O_DELETE 0x10000
+  #define O_DELETE 0x10000
 #endif
 
 // Open a file. Unlink the file immediately after open returns
@@ -3851,7 +3860,6 @@
 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
 
 int os::open(const char *path, int oflag, int mode) {
-
   if (strlen(path) > MAX_PATH - 1) {
     errno = ENAMETOOLONG;
     return -1;
@@ -3863,7 +3871,7 @@
   fd = ::open(path, oflag, mode);
   if (fd == -1) return -1;
 
-  //If the open succeeded, the file might still be a directory
+  // If the open succeeded, the file might still be a directory
   {
     struct stat buf;
     int ret = ::fstat(fd, &buf);
@@ -3881,34 +3889,34 @@
     }
   }
 
-    /*
-     * All file descriptors that are opened in the JVM and not
-     * specifically destined for a subprocess should have the
-     * close-on-exec flag set.  If we don't set it, then careless 3rd
-     * party native code might fork and exec without closing all
-     * appropriate file descriptors (e.g. as we do in closeDescriptors in
-     * UNIXProcess.c), and this in turn might:
-     *
-     * - cause end-of-file to fail to be detected on some file
-     *   descriptors, resulting in mysterious hangs, or
-     *
-     * - might cause an fopen in the subprocess to fail on a system
-     *   suffering from bug 1085341.
-     *
-     * (Yes, the default setting of the close-on-exec flag is a Unix
-     * design flaw)
-     *
-     * See:
-     * 1085341: 32-bit stdio routines should support file descriptors >255
-     * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
-     * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
-     */
+  // All file descriptors that are opened in the JVM and not
+  // specifically destined for a subprocess should have the
+  // close-on-exec flag set.  If we don't set it, then careless 3rd
+  // party native code might fork and exec without closing all
+  // appropriate file descriptors (e.g. as we do in closeDescriptors in
+  // UNIXProcess.c), and this in turn might:
+  //
+  // - cause end-of-file to fail to be detected on some file
+  //   descriptors, resulting in mysterious hangs, or
+  //
+  // - might cause an fopen in the subprocess to fail on a system
+  //   suffering from bug 1085341.
+  //
+  // (Yes, the default setting of the close-on-exec flag is a Unix
+  // design flaw)
+  //
+  // See:
+  // 1085341: 32-bit stdio routines should support file descriptors >255
+  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
+  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
+  //
 #ifdef FD_CLOEXEC
-    {
-        int flags = ::fcntl(fd, F_GETFD);
-        if (flags != -1)
-            ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
+  {
+    int flags = ::fcntl(fd, F_GETFD);
+    if (flags != -1) {
+      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
     }
+  }
 #endif
 
   if (o_delete != 0) {
@@ -3948,11 +3956,9 @@
   if (::fstat(fd, &buf) >= 0) {
     mode = buf.st_mode;
     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
-      /*
-      * XXX: is the following call interruptible? If so, this might
-      * need to go through the INTERRUPT_IO() wrapper as for other
-      * blocking, interruptible calls in this file.
-      */
+      // XXX: is the following call interruptible? If so, this might
+      // need to go through the INTERRUPT_IO() wrapper as for other
+      // blocking, interruptible calls in this file.
       int n;
       if (::ioctl(fd, FIONREAD, &n) >= 0) {
         *bytes = n;
@@ -3972,23 +3978,24 @@
 }
 
 int os::socket_available(int fd, jint *pbytes) {
-   if (fd < 0)
-     return OS_OK;
-
-   int ret;
-
-   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
-
-   //%% note ioctl can return 0 when successful, JVM_SocketAvailable
-   // is expected to return 0 on failure and 1 on success to the jdk.
-
-   return (ret == OS_ERR) ? 0 : 1;
+  if (fd < 0) {
+    return OS_OK;
+  }
+
+  int ret;
+
+  RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
+
+  //%% note ioctl can return 0 when successful, JVM_SocketAvailable
+  // is expected to return 0 on failure and 1 on success to the jdk.
+
+  return (ret == OS_ERR) ? 0 : 1;
 }
 
 // Map a block of memory.
 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
-                     char *addr, size_t bytes, bool read_only,
-                     bool allow_exec) {
+                        char *addr, size_t bytes, bool read_only,
+                        bool allow_exec) {
   int prot;
   int flags;
 
@@ -4019,8 +4026,8 @@
 
 // Remap a block of memory.
 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
-                       char *addr, size_t bytes, bool read_only,
-                       bool allow_exec) {
+                          char *addr, size_t bytes, bool read_only,
+                          bool allow_exec) {
   // same as map_memory() on this OS
   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
                         allow_exec);
@@ -4075,8 +4082,9 @@
 
   mach_thread = thread->osthread()->thread_id();
   kr = thread_info(mach_thread, THREAD_BASIC_INFO, (thread_info_t)&tinfo, &tcount);
-  if (kr != KERN_SUCCESS)
+  if (kr != KERN_SUCCESS) {
     return -1;
+  }
 
   if (user_sys_cpu_time) {
     jlong nanos;
@@ -4139,7 +4147,7 @@
     }
   } else {
     jio_fprintf(stderr,
-      "Could not open pause file '%s', continuing immediately.\n", filename);
+                "Could not open pause file '%s', continuing immediately.\n", filename);
   }
 }
 
@@ -4207,7 +4215,8 @@
 // abstime will be the absolute timeout time
 // TODO: replace compute_abstime() with unpackTime()
 
-static struct timespec* compute_abstime(struct timespec* abstime, jlong millis) {
+static struct timespec* compute_abstime(struct timespec* abstime,
+                                        jlong millis) {
   if (millis < 0)  millis = 0;
   struct timeval now;
   int status = gettimeofday(&now, NULL);
@@ -4235,28 +4244,28 @@
 
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v == 0) {
-     // Do this the hard way by blocking ...
-     int status = pthread_mutex_lock(_mutex);
-     assert_status(status == 0, status, "mutex_lock");
-     guarantee(_nParked == 0, "invariant");
-     ++_nParked;
-     while (_Event < 0) {
-        status = pthread_cond_wait(_cond, _mutex);
-        // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
-        // Treat this the same as if the wait was interrupted
-        if (status == ETIMEDOUT) { status = EINTR; }
-        assert_status(status == 0 || status == EINTR, status, "cond_wait");
-     }
-     --_nParked;
+    // Do this the hard way by blocking ...
+    int status = pthread_mutex_lock(_mutex);
+    assert_status(status == 0, status, "mutex_lock");
+    guarantee(_nParked == 0, "invariant");
+    ++_nParked;
+    while (_Event < 0) {
+      status = pthread_cond_wait(_cond, _mutex);
+      // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
+      // Treat this the same as if the wait was interrupted
+      if (status == ETIMEDOUT) { status = EINTR; }
+      assert_status(status == 0 || status == EINTR, status, "cond_wait");
+    }
+    --_nParked;
 
     _Event = 0;
-     status = pthread_mutex_unlock(_mutex);
-     assert_status(status == 0, status, "mutex_unlock");
+    status = pthread_mutex_unlock(_mutex);
+    assert_status(status == 0, status, "mutex_unlock");
     // Paranoia to ensure our locked and lock-free paths interact
     // correctly with each other.
     OrderAccess::fence();
@@ -4269,8 +4278,8 @@
 
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v != 0) return OS_OK;
@@ -4314,7 +4323,7 @@
   }
   --_nParked;
   if (_Event >= 0) {
-     ret = OS_OK;
+    ret = OS_OK;
   }
   _Event = 0;
   status = pthread_mutex_unlock(_mutex);
@@ -4370,36 +4379,33 @@
 // JSR166
 // -------------------------------------------------------
 
-/*
- * The solaris and bsd implementations of park/unpark are fairly
- * conservative for now, but can be improved. They currently use a
- * mutex/condvar pair, plus a a count.
- * Park decrements count if > 0, else does a condvar wait.  Unpark
- * sets count to 1 and signals condvar.  Only one thread ever waits
- * on the condvar. Contention seen when trying to park implies that someone
- * is unparking you, so don't wait. And spurious returns are fine, so there
- * is no need to track notifications.
- */
+// The solaris and bsd implementations of park/unpark are fairly
+// conservative for now, but can be improved. They currently use a
+// mutex/condvar pair, plus a a count.
+// Park decrements count if > 0, else does a condvar wait.  Unpark
+// sets count to 1 and signals condvar.  Only one thread ever waits
+// on the condvar. Contention seen when trying to park implies that someone
+// is unparking you, so don't wait. And spurious returns are fine, so there
+// is no need to track notifications.
 
 #define MAX_SECS 100000000
-/*
- * This code is common to bsd and solaris and will be moved to a
- * common place in dolphin.
- *
- * The passed in time value is either a relative time in nanoseconds
- * or an absolute time in milliseconds. Either way it has to be unpacked
- * into suitable seconds and nanoseconds components and stored in the
- * given timespec structure.
- * Given time is a 64-bit value and the time_t used in the timespec is only
- * a signed-32-bit value (except on 64-bit Bsd) we have to watch for
- * overflow if times way in the future are given. Further on Solaris versions
- * prior to 10 there is a restriction (see cond_timedwait) that the specified
- * number of seconds, in abstime, is less than current_time  + 100,000,000.
- * As it will be 28 years before "now + 100000000" will overflow we can
- * ignore overflow and just impose a hard-limit on seconds using the value
- * of "now + 100,000,000". This places a limit on the timeout of about 3.17
- * years from "now".
- */
+
+// This code is common to bsd and solaris and will be moved to a
+// common place in dolphin.
+//
+// The passed in time value is either a relative time in nanoseconds
+// or an absolute time in milliseconds. Either way it has to be unpacked
+// into suitable seconds and nanoseconds components and stored in the
+// given timespec structure.
+// Given time is a 64-bit value and the time_t used in the timespec is only
+// a signed-32-bit value (except on 64-bit Bsd) we have to watch for
+// overflow if times way in the future are given. Further on Solaris versions
+// prior to 10 there is a restriction (see cond_timedwait) that the specified
+// number of seconds, in abstime, is less than current_time  + 100,000,000.
+// As it will be 28 years before "now + 100000000" will overflow we can
+// ignore overflow and just impose a hard-limit on seconds using the value
+// of "now + 100,000,000". This places a limit on the timeout of about 3.17
+// years from "now".
 
 static void unpackTime(struct timespec* absTime, bool isAbsolute, jlong time) {
   assert(time > 0, "convertTime");
@@ -4414,19 +4420,16 @@
     jlong secs = time / 1000;
     if (secs > max_secs) {
       absTime->tv_sec = max_secs;
-    }
-    else {
+    } else {
       absTime->tv_sec = secs;
     }
     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
-  }
-  else {
+  } else {
     jlong secs = time / NANOSECS_PER_SEC;
     if (secs >= MAX_SECS) {
       absTime->tv_sec = max_secs;
       absTime->tv_nsec = 0;
-    }
-    else {
+    } else {
       absTime->tv_sec = now.tv_sec + secs;
       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
@@ -4544,17 +4547,17 @@
   const int s = _counter;
   _counter = 1;
   if (s < 1) {
-     if (WorkAroundNPTLTimedWaitHang) {
-        status = pthread_cond_signal(_cond);
-        assert(status == 0, "invariant");
-        status = pthread_mutex_unlock(_mutex);
-        assert(status == 0, "invariant");
-     } else {
-        status = pthread_mutex_unlock(_mutex);
-        assert(status == 0, "invariant");
-        status = pthread_cond_signal(_cond);
-        assert(status == 0, "invariant");
-     }
+    if (WorkAroundNPTLTimedWaitHang) {
+      status = pthread_cond_signal(_cond);
+      assert(status == 0, "invariant");
+      status = pthread_mutex_unlock(_mutex);
+      assert(status == 0, "invariant");
+    } else {
+      status = pthread_mutex_unlock(_mutex);
+      assert(status == 0, "invariant");
+      status = pthread_cond_signal(_cond);
+      assert(status == 0, "invariant");
+    }
   } else {
     pthread_mutex_unlock(_mutex);
     assert(status == 0, "invariant");
@@ -4562,10 +4565,10 @@
 }
 
 
-/* Darwin has no "environ" in a dynamic library. */
+// Darwin has no "environ" in a dynamic library.
 #ifdef __APPLE__
-#include <crt_externs.h>
-#define environ (*_NSGetEnviron())
+  #include <crt_externs.h>
+  #define environ (*_NSGetEnviron())
 #else
 extern char** environ;
 #endif
@@ -4612,26 +4615,26 @@
     // Wait for the child process to exit.  This returns immediately if
     // the child has already exited. */
     while (waitpid(pid, &status, 0) < 0) {
-        switch (errno) {
-        case ECHILD: return 0;
-        case EINTR: break;
-        default: return -1;
-        }
+      switch (errno) {
+      case ECHILD: return 0;
+      case EINTR: break;
+      default: return -1;
+      }
     }
 
     if (WIFEXITED(status)) {
-       // The child exited normally; get its exit code.
-       return WEXITSTATUS(status);
+      // The child exited normally; get its exit code.
+      return WEXITSTATUS(status);
     } else if (WIFSIGNALED(status)) {
-       // The child exited because of a signal
-       // The best value to return is 0x80 + signal number,
-       // because that is what all Unix shells do, and because
-       // it allows callers to distinguish between process exit and
-       // process death by signal.
-       return 0x80 + WTERMSIG(status);
+      // The child exited because of a signal
+      // The best value to return is 0x80 + signal number,
+      // because that is what all Unix shells do, and because
+      // it allows callers to distinguish between process exit and
+      // process death by signal.
+      return 0x80 + WTERMSIG(status);
     } else {
-       // Unknown exit code; pass it through
-       return status;
+      // Unknown exit code; pass it through
+      return status;
     }
   }
 }
@@ -4646,40 +4649,46 @@
 //
 bool os::is_headless_jre() {
 #ifdef __APPLE__
-    // We no longer build headless-only on Mac OS X
-    return false;
+  // We no longer build headless-only on Mac OS X
+  return false;
 #else
-    struct stat statbuf;
-    char buf[MAXPATHLEN];
-    char libmawtpath[MAXPATHLEN];
-    const char *xawtstr  = "/xawt/libmawt" JNI_LIB_SUFFIX;
-    const char *new_xawtstr = "/libawt_xawt" JNI_LIB_SUFFIX;
-    char *p;
-
-    // Get path to libjvm.so
-    os::jvm_path(buf, sizeof(buf));
-
-    // Get rid of libjvm.so
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // Get rid of client or server
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // check xawt/libmawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    // check libawt_xawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, new_xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    return true;
+  struct stat statbuf;
+  char buf[MAXPATHLEN];
+  char libmawtpath[MAXPATHLEN];
+  const char *xawtstr  = "/xawt/libmawt" JNI_LIB_SUFFIX;
+  const char *new_xawtstr = "/libawt_xawt" JNI_LIB_SUFFIX;
+  char *p;
+
+  // Get path to libjvm.so
+  os::jvm_path(buf, sizeof(buf));
+
+  // Get rid of libjvm.so
+  p = strrchr(buf, '/');
+  if (p == NULL) {
+    return false;
+  } else {
+    *p = '\0';
+  }
+
+  // Get rid of client or server
+  p = strrchr(buf, '/');
+  if (p == NULL) {
+    return false;
+  } else {
+    *p = '\0';
+  }
+
+  // check xawt/libmawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  // check libawt_xawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, new_xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  return true;
 #endif
 }
 
--- a/hotspot/src/os/bsd/vm/os_bsd.hpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.hpp	Wed Sep 10 17:06:36 2014 -0700
@@ -30,8 +30,8 @@
 // Information about the protection of the page at address '0' on this os.
 static bool zero_page_read_protected() { return true; }
 
-/* pthread_getattr_np comes with BsdThreads-0.9-7 on RedHat 7.1 */
-typedef int (*pthread_getattr_func_type) (pthread_t, pthread_attr_t *);
+// pthread_getattr_np comes with BsdThreads-0.9-7 on RedHat 7.1
+typedef int (*pthread_getattr_func_type)(pthread_t, pthread_attr_t *);
 
 #ifdef __APPLE__
 // Mac OS X doesn't support clock_gettime. Stub out the type, it is
@@ -108,7 +108,7 @@
   // that file provides extensions to the os class and not the
   // Bsd class.
   static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
-    intptr_t** ret_sp, intptr_t** ret_fp);
+                                              intptr_t** ret_sp, intptr_t** ret_fp);
 
   // This boolean allows users to forward their own non-matching signals
   // to JVM_handle_bsd_signal, harmlessly.
@@ -147,7 +147,7 @@
   // BsdThreads work-around for 6292965
   static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
 
-private:
+ private:
   typedef int (*sched_getcpu_func_t)(void);
   typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
   typedef int (*numa_max_node_func_t)(void);
@@ -170,7 +170,7 @@
   static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
   static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
-public:
+ public:
   static int sched_getcpu()  { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
   static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
     return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
@@ -190,55 +190,55 @@
 
 
 class PlatformEvent : public CHeapObj<mtInternal> {
-  private:
-    double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
-    volatile int _Event;
-    volatile int _nParked;
-    pthread_mutex_t _mutex[1];
-    pthread_cond_t  _cond[1];
-    double PostPad[2];
-    Thread * _Assoc;
+ private:
+  double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
+  volatile int _Event;
+  volatile int _nParked;
+  pthread_mutex_t _mutex[1];
+  pthread_cond_t  _cond[1];
+  double PostPad[2];
+  Thread * _Assoc;
 
-  public:       // TODO-FIXME: make dtor private
-    ~PlatformEvent() { guarantee(0, "invariant"); }
+ public:       // TODO-FIXME: make dtor private
+  ~PlatformEvent() { guarantee(0, "invariant"); }
 
-  public:
-    PlatformEvent() {
-      int status;
-      status = pthread_cond_init (_cond, NULL);
-      assert_status(status == 0, status, "cond_init");
-      status = pthread_mutex_init (_mutex, NULL);
-      assert_status(status == 0, status, "mutex_init");
-      _Event   = 0;
-      _nParked = 0;
-      _Assoc   = NULL;
-    }
+ public:
+  PlatformEvent() {
+    int status;
+    status = pthread_cond_init(_cond, NULL);
+    assert_status(status == 0, status, "cond_init");
+    status = pthread_mutex_init(_mutex, NULL);
+    assert_status(status == 0, status, "mutex_init");
+    _Event   = 0;
+    _nParked = 0;
+    _Assoc   = NULL;
+  }
 
-    // Use caution with reset() and fired() -- they may require MEMBARs
-    void reset() { _Event = 0; }
-    int  fired() { return _Event; }
-    void park();
-    void unpark();
-    int  park(jlong millis);
-    void SetAssociation(Thread * a) { _Assoc = a; }
+  // Use caution with reset() and fired() -- they may require MEMBARs
+  void reset() { _Event = 0; }
+  int  fired() { return _Event; }
+  void park();
+  void unpark();
+  int  park(jlong millis);
+  void SetAssociation(Thread * a) { _Assoc = a; }
 };
 
 class PlatformParker : public CHeapObj<mtInternal> {
-  protected:
-    pthread_mutex_t _mutex[1];
-    pthread_cond_t  _cond[1];
+ protected:
+  pthread_mutex_t _mutex[1];
+  pthread_cond_t  _cond[1];
 
-  public:       // TODO-FIXME: make dtor private
-    ~PlatformParker() { guarantee(0, "invariant"); }
+ public:       // TODO-FIXME: make dtor private
+  ~PlatformParker() { guarantee(0, "invariant"); }
 
-  public:
-    PlatformParker() {
-      int status;
-      status = pthread_cond_init (_cond, NULL);
-      assert_status(status == 0, status, "cond_init");
-      status = pthread_mutex_init (_mutex, NULL);
-      assert_status(status == 0, status, "mutex_init");
-    }
+ public:
+  PlatformParker() {
+    int status;
+    status = pthread_cond_init(_cond, NULL);
+    assert_status(status == 0, status, "cond_init");
+    status = pthread_mutex_init(_mutex, NULL);
+    assert_status(status == 0, status, "mutex_init");
+  }
 };
 
 #endif // OS_BSD_VM_OS_BSD_HPP
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Wed Sep 10 17:06:36 2014 -0700
@@ -109,7 +109,7 @@
 // if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 // getrusage() is prepared to handle the associated failure.
 #ifndef RUSAGE_THREAD
-#define RUSAGE_THREAD   (1)               /* only the calling thread */
+  #define RUSAGE_THREAD   (1)               /* only the calling thread */
 #endif
 
 #define MAX_PATH    (2 * K)
@@ -150,13 +150,13 @@
 
 static pid_t _initial_pid = 0;
 
-/* Signal number used to suspend/resume a thread */
-
-/* do not use any signal number less than SIGSEGV, see 4355769 */
+// Signal number used to suspend/resume a thread
+
+// do not use any signal number less than SIGSEGV, see 4355769
 static int SR_signum = SIGUSR2;
 sigset_t SR_sigset;
 
-/* Used to protect dlsym() calls */
+// Used to protect dlsym() calls
 static pthread_mutex_t dl_mutex;
 
 // Declarations
@@ -240,17 +240,17 @@
 
 #ifndef SYS_gettid
 // i386: 224, ia64: 1105, amd64: 186, sparc 143
-#ifdef __ia64__
-#define SYS_gettid 1105
-#elif __i386__
-#define SYS_gettid 224
-#elif __amd64__
-#define SYS_gettid 186
-#elif __sparc__
-#define SYS_gettid 143
-#else
-#error define gettid for the arch
-#endif
+  #ifdef __ia64__
+    #define SYS_gettid 1105
+  #elif __i386__
+    #define SYS_gettid 224
+  #elif __amd64__
+    #define SYS_gettid 186
+  #elif __sparc__
+    #define SYS_gettid 143
+  #else
+    #error define gettid for the arch
+  #endif
 #endif
 
 // Cpu architecture string
@@ -269,13 +269,13 @@
 #elif defined(PPC64)
 static char cpu_arch[] = "ppc64";
 #elif defined(SPARC)
-#  ifdef _LP64
+  #ifdef _LP64
 static char cpu_arch[] = "sparcv9";
-#  else
+  #else
 static char cpu_arch[] = "sparc";
-#  endif
+  #endif
 #else
-#error Add appropriate cpu_arch setting
+  #error Add appropriate cpu_arch setting
 #endif
 
 
@@ -290,10 +290,10 @@
 pid_t os::Linux::gettid() {
   int rslt = syscall(SYS_gettid);
   if (rslt == -1) {
-     // old kernel, no NPTL support
-     return getpid();
+    // old kernel, no NPTL support
+    return getpid();
   } else {
-     return (pid_t)rslt;
+    return (pid_t)rslt;
   }
 }
 
@@ -350,16 +350,16 @@
   // Important note: if the location of libjvm.so changes this
   // code needs to be changed accordingly.
 
-// See ld(1):
-//      The linker uses the following search paths to locate required
-//      shared libraries:
-//        1: ...
-//        ...
-//        7: The default directories, normally /lib and /usr/lib.
+  // See ld(1):
+  //      The linker uses the following search paths to locate required
+  //      shared libraries:
+  //        1: ...
+  //        ...
+  //        7: The default directories, normally /lib and /usr/lib.
 #if defined(AMD64) || defined(_LP64) && (defined(SPARC) || defined(PPC) || defined(S390))
-#define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
+  #define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
 #else
-#define DEFAULT_LIBPATH "/lib:/usr/lib"
+  #define DEFAULT_LIBPATH "/lib:/usr/lib"
 #endif
 
 // Base path of extensions installed on the system.
@@ -465,14 +465,15 @@
 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 
 bool os::Linux::is_sig_ignored(int sig) {
-      struct sigaction oact;
-      sigaction(sig, (struct sigaction*)NULL, &oact);
-      void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
-                                     : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
-      if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
-           return true;
-      else
-           return false;
+  struct sigaction oact;
+  sigaction(sig, (struct sigaction*)NULL, &oact);
+  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
+                                 : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
+  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
+    return true;
+  } else {
+    return false;
+  }
 }
 
 void os::Linux::signal_sets_init() {
@@ -503,23 +504,24 @@
   sigaddset(&unblocked_sigs, SR_signum);
 
   if (!ReduceSignalUsage) {
-   if (!os::Linux::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
+    if (!os::Linux::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
-   }
-   if (!os::Linux::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
+    }
+    if (!os::Linux::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
-   }
-   if (!os::Linux::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
+    }
+    if (!os::Linux::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
-   }
+    }
   }
   // Fill in signals that are blocked by all but the VM thread.
   sigemptyset(&vm_sigs);
-  if (!ReduceSignalUsage)
+  if (!ReduceSignalUsage) {
     sigaddset(&vm_sigs, BREAK_SIGNAL);
+  }
   debug_only(signal_sets_initialized = true);
 
 }
@@ -574,59 +576,59 @@
   // and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
   // generic name for earlier versions.
   // Define macros here so we can build HotSpot on old systems.
-# ifndef _CS_GNU_LIBC_VERSION
-# define _CS_GNU_LIBC_VERSION 2
-# endif
-# ifndef _CS_GNU_LIBPTHREAD_VERSION
-# define _CS_GNU_LIBPTHREAD_VERSION 3
-# endif
+#ifndef _CS_GNU_LIBC_VERSION
+  #define _CS_GNU_LIBC_VERSION 2
+#endif
+#ifndef _CS_GNU_LIBPTHREAD_VERSION
+  #define _CS_GNU_LIBPTHREAD_VERSION 3
+#endif
 
   size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
   if (n > 0) {
-     char *str = (char *)malloc(n, mtInternal);
-     confstr(_CS_GNU_LIBC_VERSION, str, n);
-     os::Linux::set_glibc_version(str);
+    char *str = (char *)malloc(n, mtInternal);
+    confstr(_CS_GNU_LIBC_VERSION, str, n);
+    os::Linux::set_glibc_version(str);
   } else {
-     // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
-     static char _gnu_libc_version[32];
-     jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
-              "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
-     os::Linux::set_glibc_version(_gnu_libc_version);
+    // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
+    static char _gnu_libc_version[32];
+    jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
+                 "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
+    os::Linux::set_glibc_version(_gnu_libc_version);
   }
 
   n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
   if (n > 0) {
-     char *str = (char *)malloc(n, mtInternal);
-     confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
-     // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
-     // us "NPTL-0.29" even we are running with LinuxThreads. Check if this
-     // is the case. LinuxThreads has a hard limit on max number of threads.
-     // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
-     // On the other hand, NPTL does not have such a limit, sysconf()
-     // will return -1 and errno is not changed. Check if it is really NPTL.
-     if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 &&
-         strstr(str, "NPTL") &&
-         sysconf(_SC_THREAD_THREADS_MAX) > 0) {
-       free(str);
-       os::Linux::set_libpthread_version("linuxthreads");
-     } else {
-       os::Linux::set_libpthread_version(str);
-     }
+    char *str = (char *)malloc(n, mtInternal);
+    confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
+    // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
+    // us "NPTL-0.29" even we are running with LinuxThreads. Check if this
+    // is the case. LinuxThreads has a hard limit on max number of threads.
+    // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
+    // On the other hand, NPTL does not have such a limit, sysconf()
+    // will return -1 and errno is not changed. Check if it is really NPTL.
+    if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 &&
+        strstr(str, "NPTL") &&
+        sysconf(_SC_THREAD_THREADS_MAX) > 0) {
+      free(str);
+      os::Linux::set_libpthread_version("linuxthreads");
+    } else {
+      os::Linux::set_libpthread_version(str);
+    }
   } else {
     // glibc before 2.3.2 only has LinuxThreads.
     os::Linux::set_libpthread_version("linuxthreads");
   }
 
   if (strstr(libpthread_version(), "NPTL")) {
-     os::Linux::set_is_NPTL();
+    os::Linux::set_is_NPTL();
   } else {
-     os::Linux::set_is_LinuxThreads();
+    os::Linux::set_is_LinuxThreads();
   }
 
   // LinuxThreads have two flavors: floating-stack mode, which allows variable
   // stack size; and fixed-stack mode. NPTL is always floating-stack.
   if (os::Linux::is_NPTL() || os::Linux::supports_variable_stack_size()) {
-     os::Linux::set_is_floating_stack();
+    os::Linux::set_is_floating_stack();
   }
 }
 
@@ -683,9 +685,9 @@
 // should always be true if the function is not inlined.
 
 #if __GNUC__ < 3    // gcc 2.x does not support noinline attribute
-#define NOINLINE
+  #define NOINLINE
 #else
-#define NOINLINE __attribute__ ((noinline))
+  #define NOINLINE __attribute__ ((noinline))
 #endif
 
 static void _expand_stack_to(address bottom) NOINLINE;
@@ -832,7 +834,8 @@
   return 0;
 }
 
-bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
+bool os::create_thread(Thread* thread, ThreadType thr_type,
+                       size_t stack_size) {
   assert(thread->osthread() == NULL, "caller responsible");
 
   // Allocate the OSThread object
@@ -935,9 +938,9 @@
 
   // Aborted due to thread limit being reached
   if (state == ZOMBIE) {
-      thread->set_osthread(NULL);
-      delete osthread;
-      return false;
+    thread->set_osthread(NULL);
+    delete osthread;
+    return false;
   }
 
   // The thread is returned suspended (in state INITIALIZED),
@@ -957,7 +960,7 @@
 
 bool os::create_attached_thread(JavaThread* thread) {
 #ifdef ASSERT
-    thread->verify_not_published();
+  thread->verify_not_published();
 #endif
 
   // Allocate the OSThread object
@@ -1029,7 +1032,7 @@
     // Restore caller's signal mask
     sigset_t sigmask = osthread->caller_sigmask();
     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
-   }
+  }
 
   delete osthread;
 }
@@ -1084,9 +1087,11 @@
          initial_thread_stack_size()   != 0,
          "os::init did not locate initial thread's stack region");
   if ((address)&dummy >= initial_thread_stack_bottom() &&
-      (address)&dummy < initial_thread_stack_bottom() + initial_thread_stack_size())
-       return true;
-  else return false;
+      (address)&dummy < initial_thread_stack_bottom() + initial_thread_stack_size()) {
+    return true;
+  } else {
+    return false;
+  }
 }
 
 // Find the virtual memory area that contains addr
@@ -1097,10 +1102,10 @@
     while (!feof(fp)) {
       if (fscanf(fp, "%p-%p", &low, &high) == 2) {
         if (low <= addr && addr < high) {
-           if (vma_low)  *vma_low  = low;
-           if (vma_high) *vma_high = high;
-           fclose(fp);
-           return true;
+          if (vma_low)  *vma_low  = low;
+          if (vma_high) *vma_high = high;
+          fclose(fp);
+          return true;
         }
       }
       for (;;) {
@@ -1136,8 +1141,9 @@
   //   in case other parts in glibc still assumes 2M max stack size.
   // FIXME: alt signal stack is gone, maybe we can relax this constraint?
   // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small
-  if (stack_size > 2 * K * K IA64_ONLY(*2))
-      stack_size = 2 * K * K IA64_ONLY(*2);
+  if (stack_size > 2 * K * K IA64_ONLY(*2)) {
+    stack_size = 2 * K * K IA64_ONLY(*2);
+  }
   // Try to figure out where the stack base (top) is. This is harder.
   //
   // When an application is started, glibc saves the initial stack pointer in
@@ -1221,46 +1227,46 @@
 #define _UFM UINTX_FORMAT
 #define _DFM INTX_FORMAT
 
-        /*                                     1   1   1   1   1   1   1   1   1   1   2   2    2    2    2    2    2    2    2 */
-        /*              3  4  5  6  7  8   9   0   1   2   3   4   5   6   7   8   9   0   1    2    3    4    5    6    7    8 */
+        //                                     1   1   1   1   1   1   1   1   1   1   2   2    2    2    2    2    2    2    2
+        //              3  4  5  6  7  8   9   0   1   2   3   4   5   6   7   8   9   0   1    2    3    4    5    6    7    8
         i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
-             &state,          /* 3  %c  */
-             &ppid,           /* 4  %d  */
-             &pgrp,           /* 5  %d  */
-             &session,        /* 6  %d  */
-             &nr,             /* 7  %d  */
-             &tpgrp,          /* 8  %d  */
-             &flags,          /* 9  %lu  */
-             &minflt,         /* 10 %lu  */
-             &cminflt,        /* 11 %lu  */
-             &majflt,         /* 12 %lu  */
-             &cmajflt,        /* 13 %lu  */
-             &utime,          /* 14 %lu  */
-             &stime,          /* 15 %lu  */
-             &cutime,         /* 16 %ld  */
-             &cstime,         /* 17 %ld  */
-             &prio,           /* 18 %ld  */
-             &nice,           /* 19 %ld  */
-             &junk,           /* 20 %ld  */
-             &it_real,        /* 21 %ld  */
-             &start,          /* 22 UINTX_FORMAT */
-             &vsize,          /* 23 UINTX_FORMAT */
-             &rss,            /* 24 INTX_FORMAT  */
-             &rsslim,         /* 25 UINTX_FORMAT */
-             &scodes,         /* 26 UINTX_FORMAT */
-             &ecode,          /* 27 UINTX_FORMAT */
-             &stack_start);   /* 28 UINTX_FORMAT */
+                   &state,          // 3  %c
+                   &ppid,           // 4  %d
+                   &pgrp,           // 5  %d
+                   &session,        // 6  %d
+                   &nr,             // 7  %d
+                   &tpgrp,          // 8  %d
+                   &flags,          // 9  %lu
+                   &minflt,         // 10 %lu
+                   &cminflt,        // 11 %lu
+                   &majflt,         // 12 %lu
+                   &cmajflt,        // 13 %lu
+                   &utime,          // 14 %lu
+                   &stime,          // 15 %lu
+                   &cutime,         // 16 %ld
+                   &cstime,         // 17 %ld
+                   &prio,           // 18 %ld
+                   &nice,           // 19 %ld
+                   &junk,           // 20 %ld
+                   &it_real,        // 21 %ld
+                   &start,          // 22 UINTX_FORMAT
+                   &vsize,          // 23 UINTX_FORMAT
+                   &rss,            // 24 INTX_FORMAT
+                   &rsslim,         // 25 UINTX_FORMAT
+                   &scodes,         // 26 UINTX_FORMAT
+                   &ecode,          // 27 UINTX_FORMAT
+                   &stack_start);   // 28 UINTX_FORMAT
       }
 
 #undef _UFM
 #undef _DFM
 
       if (i != 28 - 2) {
-         assert(false, "Bad conversion from /proc/self/stat");
-         // product mode - assume we are the initial thread, good luck in the
-         // embedded case.
-         warning("Can't detect initial thread stack location - bad conversion");
-         stack_start = (uintptr_t) &rlim;
+        assert(false, "Bad conversion from /proc/self/stat");
+        // product mode - assume we are the initial thread, good luck in the
+        // embedded case.
+        warning("Can't detect initial thread stack location - bad conversion");
+        stack_start = (uintptr_t) &rlim;
       }
     } else {
       // For some reason we can't open /proc/self/stat (for example, running on
@@ -1298,9 +1304,9 @@
   stack_top = align_size_up(stack_top, page_size());
 
   if (max_size && stack_size > max_size) {
-     _initial_thread_stack_size = max_size;
+    _initial_thread_stack_size = max_size;
   } else {
-     _initial_thread_stack_size = stack_size;
+    _initial_thread_stack_size = stack_size;
   }
 
   _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
@@ -1348,7 +1354,7 @@
 }
 
 #ifndef CLOCK_MONOTONIC
-#define CLOCK_MONOTONIC (1)
+  #define CLOCK_MONOTONIC (1)
 #endif
 
 void os::Linux::clock_init() {
@@ -1391,17 +1397,15 @@
 }
 
 #ifndef SYS_clock_getres
-
-#if defined(IA32) || defined(AMD64)
-#define SYS_clock_getres IA32_ONLY(266)  AMD64_ONLY(229)
-#define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
+  #if defined(IA32) || defined(AMD64)
+    #define SYS_clock_getres IA32_ONLY(266)  AMD64_ONLY(229)
+    #define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
+  #else
+    #warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
+    #define sys_clock_getres(x,y)  -1
+  #endif
 #else
-#warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
-#define sys_clock_getres(x,y)  -1
-#endif
-
-#else
-#define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
+  #define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
 #endif
 
 void os::Linux::fast_thread_clock_init() {
@@ -1423,9 +1427,8 @@
   // better than 1 sec. This is extra check for reliability.
 
   if (pthread_getcpuclockid_func &&
-     pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
-     sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
-
+      pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
+      sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
     _supports_fast_thread_cpu_time = true;
     _pthread_getcpuclockid = pthread_getcpuclockid_func;
   }
@@ -1558,7 +1561,6 @@
 // from src/solaris/hpi/src/system_md.c
 
 size_t os::lasterror(char *buf, size_t len) {
-
   if (errno == 0)  return 0;
 
   const char *s = ::strerror(errno);
@@ -1769,9 +1771,9 @@
   int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);
 
   if (rslt) {
-     // buf already contains library name
-     if (offset) *offset = addr - data.base;
-     return true;
+    // buf already contains library name
+    if (offset) *offset = addr - data.base;
+    return true;
   }
   if (dladdr((void*)addr, &dlinfo) != 0) {
     if (dlinfo.dli_fname != NULL) {
@@ -1788,9 +1790,9 @@
   return false;
 }
 
-  // Loads .dll/.so and
-  // in case of error it checks if .dll/.so was built for the
-  // same architecture as Hotspot is running on
+// Loads .dll/.so and
+// in case of error it checks if .dll/.so was built for the
+// same architecture as Hotspot is running on
 
 
 // Remember the stack's state. The Linux dynamic linker will change
@@ -1818,8 +1820,7 @@
   void* loaded_library() { return _lib; }
 };
 
-void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
-{
+void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
   void * result = NULL;
   bool load_attempted = false;
 
@@ -1905,7 +1906,7 @@
 
   bool failed_to_read_elf_head=
     (sizeof(elf_head)!=
-        (::read(file_descriptor, &elf_head,sizeof(elf_head))));
+     (::read(file_descriptor, &elf_head,sizeof(elf_head))));
 
   ::close(file_descriptor);
   if (failed_to_read_elf_head) {
@@ -1921,9 +1922,9 @@
     char*       name;         // String representation
   } arch_t;
 
-  #ifndef EM_486
+#ifndef EM_486
   #define EM_486          6               /* Intel 80486 */
-  #endif
+#endif
 
   static const arch_t arch_array[]={
     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
@@ -1948,38 +1949,38 @@
     {EM_68K,         EM_68K,     ELFCLASS32, ELFDATA2MSB, (char*)"M68k"}
   };
 
-  #if  (defined IA32)
-    static  Elf32_Half running_arch_code=EM_386;
-  #elif   (defined AMD64)
-    static  Elf32_Half running_arch_code=EM_X86_64;
-  #elif  (defined IA64)
-    static  Elf32_Half running_arch_code=EM_IA_64;
-  #elif  (defined __sparc) && (defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARCV9;
-  #elif  (defined __sparc) && (!defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARC;
-  #elif  (defined __powerpc64__)
-    static  Elf32_Half running_arch_code=EM_PPC64;
-  #elif  (defined __powerpc__)
-    static  Elf32_Half running_arch_code=EM_PPC;
-  #elif  (defined ARM)
-    static  Elf32_Half running_arch_code=EM_ARM;
-  #elif  (defined S390)
-    static  Elf32_Half running_arch_code=EM_S390;
-  #elif  (defined ALPHA)
-    static  Elf32_Half running_arch_code=EM_ALPHA;
-  #elif  (defined MIPSEL)
-    static  Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
-  #elif  (defined PARISC)
-    static  Elf32_Half running_arch_code=EM_PARISC;
-  #elif  (defined MIPS)
-    static  Elf32_Half running_arch_code=EM_MIPS;
-  #elif  (defined M68K)
-    static  Elf32_Half running_arch_code=EM_68K;
-  #else
+#if  (defined IA32)
+  static  Elf32_Half running_arch_code=EM_386;
+#elif   (defined AMD64)
+  static  Elf32_Half running_arch_code=EM_X86_64;
+#elif  (defined IA64)
+  static  Elf32_Half running_arch_code=EM_IA_64;
+#elif  (defined __sparc) && (defined _LP64)
+  static  Elf32_Half running_arch_code=EM_SPARCV9;
+#elif  (defined __sparc) && (!defined _LP64)
+  static  Elf32_Half running_arch_code=EM_SPARC;
+#elif  (defined __powerpc64__)
+  static  Elf32_Half running_arch_code=EM_PPC64;
+#elif  (defined __powerpc__)
+  static  Elf32_Half running_arch_code=EM_PPC;
+#elif  (defined ARM)
+  static  Elf32_Half running_arch_code=EM_ARM;
+#elif  (defined S390)
+  static  Elf32_Half running_arch_code=EM_S390;
+#elif  (defined ALPHA)
+  static  Elf32_Half running_arch_code=EM_ALPHA;
+#elif  (defined MIPSEL)
+  static  Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
+#elif  (defined PARISC)
+  static  Elf32_Half running_arch_code=EM_PARISC;
+#elif  (defined MIPS)
+  static  Elf32_Half running_arch_code=EM_MIPS;
+#elif  (defined M68K)
+  static  Elf32_Half running_arch_code=EM_68K;
+#else
     #error Method os::dll_load requires that one of following is defined:\
          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
-  #endif
+#endif
 
   // Identify compatability class for VM's architecture and library's architecture
   // Obtain string descriptions for architectures
@@ -1998,7 +1999,7 @@
   }
 
   assert(running_arch_index != -1,
-    "Didn't find running architecture code (running_arch_code) in arch_array");
+         "Didn't find running architecture code (running_arch_code) in arch_array");
   if (running_arch_index == -1) {
     // Even though running architecture detection failed
     // we may still continue with reporting dlerror() message
@@ -2020,20 +2021,21 @@
   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
     if (lib_arch.name!=NULL) {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-        " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
-        lib_arch.name, arch_array[running_arch_index].name);
+                 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
+                 lib_arch.name, arch_array[running_arch_index].name);
     } else {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-      " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
-        lib_arch.code,
-        arch_array[running_arch_index].name);
+                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
+                 lib_arch.code,
+                 arch_array[running_arch_index].name);
     }
   }
 
   return NULL;
 }
 
-void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
+void * os::Linux::dlopen_helper(const char *filename, char *ebuf,
+                                int ebuflen) {
   void * result = ::dlopen(filename, RTLD_LAZY);
   if (result == NULL) {
     ::strncpy(ebuf, ::dlerror(), ebuflen - 1);
@@ -2042,7 +2044,8 @@
   return result;
 }
 
-void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf, int ebuflen) {
+void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
+                                       int ebuflen) {
   void * result = NULL;
   if (LoadExecStackDllInVMThread) {
     result = dlopen_helper(filename, ebuf, ebuflen);
@@ -2074,11 +2077,10 @@
   return result;
 }
 
-/*
- * glibc-2.0 libdl is not MT safe.  If you are building with any glibc,
- * chances are you might want to run the generated bits against glibc-2.0
- * libdl.so, so always use locking for any version of glibc.
- */
+// glibc-2.0 libdl is not MT safe.  If you are building with any glibc,
+// chances are you might want to run the generated bits against glibc-2.0
+// libdl.so, so always use locking for any version of glibc.
+//
 void* os::dll_lookup(void* handle, const char* name) {
   pthread_mutex_lock(&dl_mutex);
   void* res = dlsym(handle, name);
@@ -2093,7 +2095,7 @@
 static bool _print_ascii_file(const char* filename, outputStream* st) {
   int fd = ::open(filename, O_RDONLY);
   if (fd == -1) {
-     return false;
+    return false;
   }
 
   char buf[32];
@@ -2108,16 +2110,16 @@
 }
 
 void os::print_dll_info(outputStream *st) {
-   st->print_cr("Dynamic libraries:");
-
-   char fname[32];
-   pid_t pid = os::Linux::gettid();
-
-   jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
-
-   if (!_print_ascii_file(fname, st)) {
-     st->print("Can not get library information for pid = %d\n", pid);
-   }
+  st->print_cr("Dynamic libraries:");
+
+  char fname[32];
+  pid_t pid = os::Linux::gettid();
+
+  jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
+
+  if (!_print_ascii_file(fname, st)) {
+    st->print("Can not get library information for pid = %d\n", pid);
+  }
 }
 
 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
@@ -2207,28 +2209,28 @@
 // an informative string like "6.0.6" or "wheezy/sid". Because of this
 // "Debian " is printed before the contents of the debian_version file.
 void os::Linux::print_distro_info(outputStream* st) {
-   if (!_print_ascii_file("/etc/oracle-release", st) &&
-       !_print_ascii_file("/etc/mandriva-release", st) &&
-       !_print_ascii_file("/etc/mandrake-release", st) &&
-       !_print_ascii_file("/etc/sun-release", st) &&
-       !_print_ascii_file("/etc/redhat-release", st) &&
-       !_print_ascii_file("/etc/lsb-release", st) &&
-       !_print_ascii_file("/etc/SuSE-release", st) &&
-       !_print_ascii_file("/etc/turbolinux-release", st) &&
-       !_print_ascii_file("/etc/gentoo-release", st) &&
-       !_print_ascii_file("/etc/ltib-release", st) &&
-       !_print_ascii_file("/etc/angstrom-version", st) &&
-       !_print_ascii_file("/etc/system-release", st) &&
-       !_print_ascii_file("/etc/os-release", st)) {
-
-       if (file_exists("/etc/debian_version")) {
-         st->print("Debian ");
-         _print_ascii_file("/etc/debian_version", st);
-       } else {
-         st->print("Linux");
-       }
-   }
-   st->cr();
+  if (!_print_ascii_file("/etc/oracle-release", st) &&
+      !_print_ascii_file("/etc/mandriva-release", st) &&
+      !_print_ascii_file("/etc/mandrake-release", st) &&
+      !_print_ascii_file("/etc/sun-release", st) &&
+      !_print_ascii_file("/etc/redhat-release", st) &&
+      !_print_ascii_file("/etc/lsb-release", st) &&
+      !_print_ascii_file("/etc/SuSE-release", st) &&
+      !_print_ascii_file("/etc/turbolinux-release", st) &&
+      !_print_ascii_file("/etc/gentoo-release", st) &&
+      !_print_ascii_file("/etc/ltib-release", st) &&
+      !_print_ascii_file("/etc/angstrom-version", st) &&
+      !_print_ascii_file("/etc/system-release", st) &&
+      !_print_ascii_file("/etc/os-release", st)) {
+
+    if (file_exists("/etc/debian_version")) {
+      st->print("Debian ");
+      _print_ascii_file("/etc/debian_version", st);
+    } else {
+      st->print("Linux");
+    }
+  }
+  st->cr();
 }
 
 void os::Linux::print_libversion_info(outputStream* st) {
@@ -2237,15 +2239,15 @@
   st->print("%s ", os::Linux::glibc_version());
   st->print("%s ", os::Linux::libpthread_version());
   if (os::Linux::is_LinuxThreads()) {
-     st->print("(%s stack)", os::Linux::is_floating_stack() ? "floating" : "fixed");
+    st->print("(%s stack)", os::Linux::is_floating_stack() ? "floating" : "fixed");
   }
   st->cr();
 }
 
 void os::Linux::print_full_memory_info(outputStream* st) {
-   st->print("\n/proc/meminfo:\n");
-   _print_ascii_file("/proc/meminfo", st);
-   st->cr();
+  st->print("\n/proc/meminfo:\n");
+  _print_ascii_file("/proc/meminfo", st);
+  st->cr();
 }
 
 void os::print_memory_info(outputStream* st) {
@@ -2335,15 +2337,16 @@
 
   char dli_fname[MAXPATHLEN];
   bool ret = dll_address_to_library_name(
-                CAST_FROM_FN_PTR(address, os::jvm_path),
-                dli_fname, sizeof(dli_fname), NULL);
+                                         CAST_FROM_FN_PTR(address, os::jvm_path),
+                                         dli_fname, sizeof(dli_fname), NULL);
   assert(ret, "cannot locate libjvm");
   char *rp = NULL;
   if (ret && dli_fname[0] != '\0') {
     rp = realpath(dli_fname, buf);
   }
-  if (rp == NULL)
+  if (rp == NULL) {
     return;
+  }
 
   if (Arguments::sun_java_launcher_is_altjvm()) {
     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
@@ -2371,8 +2374,9 @@
         assert(strstr(p, "/libjvm") == p, "invalid library name");
 
         rp = realpath(java_home_var, buf);
-        if (rp == NULL)
+        if (rp == NULL) {
           return;
+        }
 
         // determine if this is a legacy image or modules image
         // modules image doesn't have "jre" subdirectory
@@ -2391,8 +2395,9 @@
         } else {
           // Go back to path of .so
           rp = realpath(dli_fname, buf);
-          if (rp == NULL)
+          if (rp == NULL) {
             return;
+          }
         }
       }
     }
@@ -2414,18 +2419,18 @@
 
 static volatile jint sigint_count = 0;
 
-static void
-UserHandler(int sig, void *siginfo, void *context) {
+static void UserHandler(int sig, void *siginfo, void *context) {
   // 4511530 - sem_post is serialized and handled by the manager thread. When
   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
   // don't want to flood the manager thread with sem_post requests.
-  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
-      return;
+  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1) {
+    return;
+  }
 
   // Ctrl-C is pressed during error reporting, likely because the error
   // handler fails to abort. Let VM die immediately.
   if (sig == SIGINT && is_error_reported()) {
-     os::die();
+    os::die();
   }
 
   os::signal_notify(sig);
@@ -2436,15 +2441,15 @@
 }
 
 class Semaphore : public StackObj {
-  public:
-    Semaphore();
-    ~Semaphore();
-    void signal();
-    void wait();
-    bool trywait();
-    bool timedwait(unsigned int sec, int nsec);
-  private:
-    sem_t _semaphore;
+ public:
+  Semaphore();
+  ~Semaphore();
+  void signal();
+  void wait();
+  bool trywait();
+  bool timedwait(unsigned int sec, int nsec);
+ private:
+  sem_t _semaphore;
 };
 
 Semaphore::Semaphore() {
@@ -2523,10 +2528,8 @@
   ::raise(signal_number);
 }
 
-/*
- * The following code is moved from os.cpp for making this
- * code platform specific, which it is by its very nature.
- */
+// The following code is moved from os.cpp for making this
+// code platform specific, which it is by its very nature.
 
 // Will be modified when max signal is changed to be dynamic
 int os::sigexitnum_pd() {
@@ -2577,12 +2580,10 @@
       // were we externally suspended while we were waiting?
       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
       if (threadIsSuspended) {
-        //
         // The semaphore has been incremented, but while we were waiting
         // another thread suspended us. We don't want to continue running
         // while suspended because that would surprise the thread that
         // suspended us.
-        //
         ::sem_post(&sig_sem);
 
         thread->java_suspend_self();
@@ -2695,7 +2696,7 @@
 int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
-                                   MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
+                                     MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
   if (res != (uintptr_t) MAP_FAILED) {
     if (UseNUMAInterleaving) {
       numa_make_global(addr, size);
@@ -2730,12 +2731,12 @@
 
 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
 #ifndef MAP_HUGETLB
-#define MAP_HUGETLB 0x40000
+  #define MAP_HUGETLB 0x40000
 #endif
 
 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
 #ifndef MADV_HUGEPAGE
-#define MADV_HUGEPAGE 14
+  #define MADV_HUGEPAGE 14
 #endif
 
 int os::Linux::commit_memory_impl(char* addr, size_t size,
@@ -2803,7 +2804,7 @@
   Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
 }
 
-bool os::numa_topology_changed()   { return false; }
+bool os::numa_topology_changed() { return false; }
 
 size_t os::numa_get_groups_num() {
   int max_node = Linux::numa_max_node();
@@ -2832,7 +2833,8 @@
   return false;
 }
 
-char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
+char *os::scan_pages(char *start, char* end, page_info* page_expected,
+                     page_info* page_found) {
   return end;
 }
 
@@ -2842,17 +2844,17 @@
   int retval = -1;
 
 #if defined(IA32)
-# ifndef SYS_getcpu
-# define SYS_getcpu 318
-# endif
+  #ifndef SYS_getcpu
+    #define SYS_getcpu 318
+  #endif
   retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
 #elif defined(AMD64)
 // Unfortunately we have to bring all these macros here from vsyscall.h
 // to be able to compile on old linuxes.
-# define __NR_vgetcpu 2
-# define VSYSCALL_START (-10UL << 20)
-# define VSYSCALL_SIZE 1024
-# define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
+  #define __NR_vgetcpu 2
+  #define VSYSCALL_START (-10UL << 20)
+  #define VSYSCALL_SIZE 1024
+  #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
   typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
   vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
   retval = vgetcpu(&cpu, NULL, NULL);
@@ -2885,8 +2887,10 @@
                                   dlsym(RTLD_DEFAULT, "sched_getcpu")));
 
   // If it's not, try a direct syscall.
-  if (sched_getcpu() == -1)
-    set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, (void*)&sched_getcpu_syscall));
+  if (sched_getcpu() == -1) {
+    set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
+                                    (void*)&sched_getcpu_syscall));
+  }
 
   if (sched_getcpu() != -1) { // Does it work?
     void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
@@ -2900,9 +2904,9 @@
       set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
                                             libnuma_dlsym(handle, "numa_tonode_memory")));
       set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
-                                            libnuma_dlsym(handle, "numa_interleave_memory")));
+                                                libnuma_dlsym(handle, "numa_interleave_memory")));
       set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
-                                            libnuma_dlsym(handle, "numa_set_bind_policy")));
+                                              libnuma_dlsym(handle, "numa_set_bind_policy")));
 
 
       if (numa_available() != -1) {
@@ -2974,12 +2978,11 @@
 
 bool os::pd_uncommit_memory(char* addr, size_t size) {
   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
-                MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
+                                     MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
   return res  != (uintptr_t) MAP_FAILED;
 }
 
-static
-address get_stack_commited_bottom(address bottom, size_t size) {
+static address get_stack_commited_bottom(address bottom, size_t size) {
   address nbot = bottom;
   address ntop = bottom + size;
 
@@ -3053,7 +3056,6 @@
 // mapping. This only affects the main/initial thread
 
 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
-
   if (os::Linux::is_initial_thread()) {
     // As we manually grow stack up to bottom inside create_attached_thread(),
     // it's likely that os::Linux::initial_thread_stack_bottom is mapped and
@@ -3065,8 +3067,8 @@
     if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
       // Fallback to slow path on all errors, including EAGAIN
       stack_extent = (uintptr_t) get_stack_commited_bottom(
-                                    os::Linux::initial_thread_stack_bottom(),
-                                    (size_t)addr - stack_extent);
+                                                           os::Linux::initial_thread_stack_bottom(),
+                                                           (size_t)addr - stack_extent);
     }
 
     if (stack_extent < (uintptr_t)addr) {
@@ -3139,7 +3141,7 @@
 }
 
 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
-                         size_t alignment_hint) {
+                            size_t alignment_hint) {
   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
 }
 
@@ -3190,7 +3192,8 @@
   return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
 }
 
-bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
+bool os::Linux::transparent_huge_pages_sanity_check(bool warn,
+                                                    size_t page_size) {
   bool result = false;
   void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
                  MAP_ANONYMOUS|MAP_PRIVATE,
@@ -3245,20 +3248,19 @@
   return result;
 }
 
-/*
-* Set the coredump_filter bits to include largepages in core dump (bit 6)
-*
-* From the coredump_filter documentation:
-*
-* - (bit 0) anonymous private memory
-* - (bit 1) anonymous shared memory
-* - (bit 2) file-backed private memory
-* - (bit 3) file-backed shared memory
-* - (bit 4) ELF header pages in file-backed private memory areas (it is
-*           effective only if the bit 2 is cleared)
-* - (bit 5) hugetlb private memory
-* - (bit 6) hugetlb shared memory
-*/
+// Set the coredump_filter bits to include largepages in core dump (bit 6)
+//
+// From the coredump_filter documentation:
+//
+// - (bit 0) anonymous private memory
+// - (bit 1) anonymous shared memory
+// - (bit 2) file-backed private memory
+// - (bit 3) file-backed shared memory
+// - (bit 4) ELF header pages in file-backed private memory areas (it is
+//           effective only if the bit 2 is cleared)
+// - (bit 5) hugetlb private memory
+// - (bit 6) hugetlb shared memory
+//
 static void set_coredump_filter(void) {
   FILE *f;
   long cdm;
@@ -3330,8 +3332,8 @@
 
   if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
     warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
-        SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
-        proper_unit_for_byte_size(large_page_size));
+            SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
+            proper_unit_for_byte_size(large_page_size));
   }
 
   return large_page_size;
@@ -3411,10 +3413,11 @@
 }
 
 #ifndef SHM_HUGETLB
-#define SHM_HUGETLB 04000
+  #define SHM_HUGETLB 04000
 #endif
 
-char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
+                                            char* req_addr, bool exec) {
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseSHM, "only for SHM large pages");
@@ -3430,33 +3433,32 @@
   bool warn_on_failure = UseLargePages &&
                         (!FLAG_IS_DEFAULT(UseLargePages) ||
                          !FLAG_IS_DEFAULT(UseSHM) ||
-                         !FLAG_IS_DEFAULT(LargePageSizeInBytes)
-                        );
+                         !FLAG_IS_DEFAULT(LargePageSizeInBytes));
   char msg[128];
 
   // Create a large shared memory region to attach to based on size.
   // Currently, size is the total size of the heap
   int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
   if (shmid == -1) {
-     // Possible reasons for shmget failure:
-     // 1. shmmax is too small for Java heap.
-     //    > check shmmax value: cat /proc/sys/kernel/shmmax
-     //    > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
-     // 2. not enough large page memory.
-     //    > check available large pages: cat /proc/meminfo
-     //    > increase amount of large pages:
-     //          echo new_value > /proc/sys/vm/nr_hugepages
-     //      Note 1: different Linux may use different name for this property,
-     //            e.g. on Redhat AS-3 it is "hugetlb_pool".
-     //      Note 2: it's possible there's enough physical memory available but
-     //            they are so fragmented after a long run that they can't
-     //            coalesce into large pages. Try to reserve large pages when
-     //            the system is still "fresh".
-     if (warn_on_failure) {
-       jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
-       warning("%s", msg);
-     }
-     return NULL;
+    // Possible reasons for shmget failure:
+    // 1. shmmax is too small for Java heap.
+    //    > check shmmax value: cat /proc/sys/kernel/shmmax
+    //    > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
+    // 2. not enough large page memory.
+    //    > check available large pages: cat /proc/meminfo
+    //    > increase amount of large pages:
+    //          echo new_value > /proc/sys/vm/nr_hugepages
+    //      Note 1: different Linux may use different name for this property,
+    //            e.g. on Redhat AS-3 it is "hugetlb_pool".
+    //      Note 2: it's possible there's enough physical memory available but
+    //            they are so fragmented after a long run that they can't
+    //            coalesce into large pages. Try to reserve large pages when
+    //            the system is still "fresh".
+    if (warn_on_failure) {
+      jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
+      warning("%s", msg);
+    }
+    return NULL;
   }
 
   // attach to the region
@@ -3470,17 +3472,18 @@
   shmctl(shmid, IPC_RMID, NULL);
 
   if ((intptr_t)addr == -1) {
-     if (warn_on_failure) {
-       jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
-       warning("%s", msg);
-     }
-     return NULL;
+    if (warn_on_failure) {
+      jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
+      warning("%s", msg);
+    }
+    return NULL;
   }
 
   return addr;
 }
 
-static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
+static void warn_on_large_pages_failure(char* req_addr, size_t bytes,
+                                        int error) {
   assert(error == ENOMEM, "Only expect to fail if no memory is available");
 
   bool warn_on_failure = UseLargePages &&
@@ -3491,12 +3494,14 @@
   if (warn_on_failure) {
     char msg[128];
     jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
-        PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
+                 PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
     warning("%s", msg);
   }
 }
 
-char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
+char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes,
+                                                        char* req_addr,
+                                                        bool exec) {
   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
   assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
   assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
@@ -3516,7 +3521,10 @@
   return addr;
 }
 
-char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes,
+                                                         size_t alignment,
+                                                         char* req_addr,
+                                                         bool exec) {
   size_t large_page_size = os::large_page_size();
 
   assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
@@ -3596,9 +3604,9 @@
   }
 
   if (lp_end != end) {
-      result = ::mmap(lp_end, end - lp_end, prot,
-                      MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
-                      -1, 0);
+    result = ::mmap(lp_end, end - lp_end, prot,
+                    MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+                    -1, 0);
     if (result == MAP_FAILED) {
       ::munmap(start, lp_end - start);
       return NULL;
@@ -3608,7 +3616,10 @@
   return start;
 }
 
-char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes,
+                                                   size_t alignment,
+                                                   char* req_addr,
+                                                   bool exec) {
   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
   assert(is_ptr_aligned(req_addr, alignment), "Must be");
   assert(is_power_of_2(alignment), "Must be");
@@ -3622,7 +3633,8 @@
   }
 }
 
-char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment,
+                                 char* req_addr, bool exec) {
   assert(UseLargePages, "only for large pages");
 
   char* addr;
@@ -3732,12 +3744,12 @@
   // if kernel honors the hint then we can return immediately.
   char * addr = anon_mmap(requested_addr, bytes, false);
   if (addr == requested_addr) {
-     return requested_addr;
+    return requested_addr;
   }
 
   if (addr != NULL) {
-     // mmap() is successful but it fails to reserve at the requested address
-     anon_munmap(addr, bytes);
+    // mmap() is successful but it fails to reserve at the requested address
+    anon_munmap(addr, bytes);
   }
 
   int i;
@@ -3792,7 +3804,6 @@
   return ::read(fd, buf, nBytes);
 }
 
-//
 // Short sleep, direct OS call.
 //
 // Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee
@@ -3811,8 +3822,7 @@
   req.tv_sec = 0;
   if (ms > 0) {
     req.tv_nsec = (ms % 1000) * 1000000;
-  }
-  else {
+  } else {
     req.tv_nsec = 1;
   }
 
@@ -3899,7 +3909,8 @@
   return (ret == 0) ? OS_OK : OS_ERR;
 }
 
-OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
+OSReturn os::get_native_priority(const Thread* const thread,
+                                 int *priority_ptr) {
   if (!UseThreadPriorities || ThreadPriorityPolicy == 0) {
     *priority_ptr = java_to_os_priority[NormPriority];
     return OS_OK;
@@ -3936,19 +3947,18 @@
 //      - sends signal to end the sigsuspend loop in the SR_handler
 //
 //  Note that the SR_lock plays no role in this suspend/resume protocol.
-//
 
 static void resume_clear_context(OSThread *osthread) {
   osthread->set_ucontext(NULL);
   osthread->set_siginfo(NULL);
 }
 
-static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
+static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo,
+                                 ucontext_t* context) {
   osthread->set_ucontext(context);
   osthread->set_siginfo(siginfo);
 }
 
-//
 // Handler function invoked when a thread's execution is suspended or
 // resumed. We have to be careful that only async-safe functions are
 // called here (Note: most pthread functions are not async safe and
@@ -4018,21 +4028,21 @@
 static int SR_initialize() {
   struct sigaction act;
   char *s;
-  /* Get signal number to use for suspend/resume */
+  // Get signal number to use for suspend/resume
   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
     int sig = ::strtol(s, 0, 10);
     if (sig > 0 || sig < _NSIG) {
-        SR_signum = sig;
+      SR_signum = sig;
     }
   }
 
   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
-        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
+         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
 
   sigemptyset(&SR_sigset);
   sigaddset(&SR_sigset, SR_signum);
 
-  /* Set up signal handler for suspend/resume */
+  // Set up signal handler for suspend/resume
   act.sa_flags = SA_RESTART|SA_SIGINFO;
   act.sa_handler = (void (*)(int)) SR_handler;
 
@@ -4158,9 +4168,10 @@
 // Note that the VM will print warnings if it detects conflicting signal
 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
 //
-extern "C" JNIEXPORT int
-JVM_handle_linux_signal(int signo, siginfo_t* siginfo,
-                        void* ucontext, int abort_if_unrecognized);
+extern "C" JNIEXPORT int JVM_handle_linux_signal(int signo,
+                                                 siginfo_t* siginfo,
+                                                 void* ucontext,
+                                                 int abort_if_unrecognized);
 
 void signalHandler(int sig, siginfo_t* info, void* uc) {
   assert(info != NULL && uc != NULL, "it must be old kernel");
@@ -4338,12 +4349,12 @@
     signal_setting_t begin_signal_setting = NULL;
     signal_setting_t end_signal_setting = NULL;
     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
-                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
+                                          dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
     if (begin_signal_setting != NULL) {
       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
-                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
+                                          dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
-                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
+                                         dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
       libjsig_is_loaded = true;
       assert(UseSignalChaining, "should enable signal-chaining");
     }
@@ -4409,7 +4420,7 @@
 // We will never set this flag, and we should
 // ignore this flag in our diagnostic
 #ifdef SIGNIFICANT_SIGNAL_MASK
-#undef SIGNIFICANT_SIGNAL_MASK
+  #undef SIGNIFICANT_SIGNAL_MASK
 #endif
 #define SIGNIFICANT_SIGNAL_MASK (~0x04000000)
 
@@ -4468,7 +4479,7 @@
 
   // Check: is it our handler?
   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
-     handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
+      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
     // It is our signal handler
     // check for flags, reset system-used one!
     if ((int)sa.sa_flags != os::Linux::get_our_sigflags(sig)) {
@@ -4481,15 +4492,17 @@
 }
 
 
-#define DO_SIGNAL_CHECK(sig) \
-  if (!sigismember(&check_signal_done, sig)) \
-    os::Linux::check_signal_handler(sig)
+#define DO_SIGNAL_CHECK(sig)                      \
+  do {                                            \
+    if (!sigismember(&check_signal_done, sig)) {  \
+      os::Linux::check_signal_handler(sig);       \
+    }                                             \
+  } while (0)
 
 // This method is a periodic task to check for misbehaving JNI applications
 // under CheckJNI, we can add any periodic checks here
 
 void os::run_periodic_checks() {
-
   if (check_signals == false) return;
 
   // SEGV and BUS if overridden could potentially prevent
@@ -4600,7 +4613,8 @@
   }
 }
 
-extern void report_error(char* file_name, int line_no, char* title, char* format, ...);
+extern void report_error(char* file_name, int line_no, char* title,
+                         char* format, ...);
 
 extern bool signal_name(int signo, char* buf, size_t len);
 
@@ -4618,7 +4632,7 @@
 
 // this is called _before_ the most of global arguments have been parsed
 void os::init(void) {
-  char dummy;   /* used to get a guess on initial stack address */
+  char dummy;   // used to get a guess on initial stack address
 //  first_hrtime = gethrtime();
 
   // With LinuxThreads the JavaMain thread pid (primordial thread)
@@ -4691,8 +4705,7 @@
 }
 
 // this is called _after_ the global arguments have been parsed
-jint os::init_2(void)
-{
+jint os::init_2(void) {
   Linux::fast_thread_clock_init();
 
   // Allocate a single page and mark it as readable for safepoint polling
@@ -4702,8 +4715,10 @@
   os::set_polling_page(polling_page);
 
 #ifndef PRODUCT
-  if (Verbose && PrintMiscellaneous)
-    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
+  if (Verbose && PrintMiscellaneous) {
+    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
+               (intptr_t)polling_page);
+  }
 #endif
 
   if (!UseMembar) {
@@ -4712,8 +4727,10 @@
     os::set_memory_serialize_page(mem_serialize_page);
 
 #ifndef PRODUCT
-    if (Verbose && PrintMiscellaneous)
-      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
+    if (Verbose && PrintMiscellaneous) {
+      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
+                 (intptr_t)mem_serialize_page);
+    }
 #endif
   }
 
@@ -4732,22 +4749,22 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
-            (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
-                    (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
+                                      (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
+                                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
       threadStackSizeInBytes < os::Linux::min_stack_allowed) {
-        tty->print_cr("\nThe stack size specified is too small, "
-                      "Specify at least %dk",
-                      os::Linux::min_stack_allowed/ K);
-        return JNI_ERR;
+    tty->print_cr("\nThe stack size specified is too small, "
+                  "Specify at least %dk",
+                  os::Linux::min_stack_allowed/ K);
+    return JNI_ERR;
   }
 
   // Make the stack size a multiple of the page size so that
   // the yellow/red zones can be guarded.
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
-        vm_page_size()));
+                                                vm_page_size()));
 
   Linux::capture_initial_stack(JavaThread::stack_size_at_create());
 
@@ -4757,9 +4774,9 @@
 
   Linux::libpthread_init();
   if (PrintMiscellaneous && (Verbose || WizardMode)) {
-     tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
-          Linux::glibc_version(), Linux::libpthread_version(),
-          Linux::is_floating_stack() ? "floating stack" : "fixed stack");
+    tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
+                  Linux::glibc_version(), Linux::libpthread_version(),
+                  Linux::is_floating_stack() ? "floating stack" : "fixed stack");
   }
 
   if (UseNUMA) {
@@ -4801,14 +4818,16 @@
     struct rlimit nbr_files;
     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
     if (status != 0) {
-      if (PrintMiscellaneous && (Verbose || WizardMode))
+      if (PrintMiscellaneous && (Verbose || WizardMode)) {
         perror("os::init_2 getrlimit failed");
+      }
     } else {
       nbr_files.rlim_cur = nbr_files.rlim_max;
       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
       if (status != 0) {
-        if (PrintMiscellaneous && (Verbose || WizardMode))
+        if (PrintMiscellaneous && (Verbose || WizardMode)) {
           perror("os::init_2 setrlimit failed");
+        }
       }
     }
   }
@@ -4854,16 +4873,17 @@
 
 // Mark the polling page as unreadable
 void os::make_polling_page_unreadable(void) {
-  if (!guard_memory((char*)_polling_page, Linux::page_size()))
+  if (!guard_memory((char*)_polling_page, Linux::page_size())) {
     fatal("Could not disable polling page");
-};
+  }
+}
 
 // Mark the polling page as readable
 void os::make_polling_page_readable(void) {
   if (!linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
     fatal("Could not enable polling page");
   }
-};
+}
 
 int os::active_processor_count() {
   // Linux doesn't yet have a (official) notion of processor sets,
@@ -4899,12 +4919,12 @@
 }
 
 class PcFetcher : public os::SuspendedThreadTask {
-public:
+ public:
   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
   ExtendedPC result();
-protected:
+ protected:
   void do_task(const os::SuspendedThreadTaskContext& context);
-private:
+ private:
   ExtendedPC _epc;
 };
 
@@ -4936,19 +4956,20 @@
   return fetcher.result();
 }
 
-int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
-{
-   if (is_NPTL()) {
-      return pthread_cond_timedwait(_cond, _mutex, _abstime);
-   } else {
-      // 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control
-      // word back to default 64bit precision if condvar is signaled. Java
-      // wants 53bit precision.  Save and restore current value.
-      int fpu = get_fpu_control_word();
-      int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
-      set_fpu_control_word(fpu);
-      return status;
-   }
+int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond,
+                                   pthread_mutex_t *_mutex,
+                                   const struct timespec *_abstime) {
+  if (is_NPTL()) {
+    return pthread_cond_timedwait(_cond, _mutex, _abstime);
+  } else {
+    // 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control
+    // word back to default 64bit precision if condvar is signaled. Java
+    // wants 53bit precision.  Save and restore current value.
+    int fpu = get_fpu_control_word();
+    int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
+    set_fpu_control_word(fpu);
+    return status;
+  }
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -4961,7 +4982,7 @@
     st->print(PTR_FORMAT ": ", addr);
     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
       st->print("%s+%#x", dlinfo.dli_sname,
-                 addr - (intptr_t)dlinfo.dli_saddr);
+                addr - (intptr_t)dlinfo.dli_saddr);
     } else if (dlinfo.dli_fbase != NULL) {
       st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
     } else {
@@ -4984,8 +5005,9 @@
       if (begin < lowest)  begin = lowest;
       Dl_info dlinfo2;
       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
-          && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
+          && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
         end = (address) dlinfo2.dli_saddr;
+      }
       Disassembler::decode(begin, end, st);
     }
     return true;
@@ -5041,7 +5063,8 @@
   return true;
 }
 
-int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
+int local_vsnprintf(char* buf, size_t count, const char* format,
+                    va_list args) {
   return ::vsnprintf(buf, count, format, args);
 }
 
@@ -5053,7 +5076,7 @@
   dir = opendir(path);
   if (dir == NULL) return true;
 
-  /* Scan the directory */
+  // Scan the directory
   bool result = true;
   char buf[sizeof(struct dirent) + MAX_PATH];
   while (result && (ptr = ::readdir(dir)) != NULL) {
@@ -5069,7 +5092,7 @@
 // from src/solaris/hpi/src/system_md.c
 
 #ifndef O_DELETE
-#define O_DELETE 0x10000
+  #define O_DELETE 0x10000
 #endif
 
 // Open a file. Unlink the file immediately after open returns
@@ -5077,7 +5100,6 @@
 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
 
 int os::open(const char *path, int oflag, int mode) {
-
   if (strlen(path) > MAX_PATH - 1) {
     errno = ENAMETOOLONG;
     return -1;
@@ -5107,34 +5129,34 @@
     }
   }
 
-    /*
-     * All file descriptors that are opened in the JVM and not
-     * specifically destined for a subprocess should have the
-     * close-on-exec flag set.  If we don't set it, then careless 3rd
-     * party native code might fork and exec without closing all
-     * appropriate file descriptors (e.g. as we do in closeDescriptors in
-     * UNIXProcess.c), and this in turn might:
-     *
-     * - cause end-of-file to fail to be detected on some file
-     *   descriptors, resulting in mysterious hangs, or
-     *
-     * - might cause an fopen in the subprocess to fail on a system
-     *   suffering from bug 1085341.
-     *
-     * (Yes, the default setting of the close-on-exec flag is a Unix
-     * design flaw)
-     *
-     * See:
-     * 1085341: 32-bit stdio routines should support file descriptors >255
-     * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
-     * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
-     */
+  // All file descriptors that are opened in the JVM and not
+  // specifically destined for a subprocess should have the
+  // close-on-exec flag set.  If we don't set it, then careless 3rd
+  // party native code might fork and exec without closing all
+  // appropriate file descriptors (e.g. as we do in closeDescriptors in
+  // UNIXProcess.c), and this in turn might:
+  //
+  // - cause end-of-file to fail to be detected on some file
+  //   descriptors, resulting in mysterious hangs, or
+  //
+  // - might cause an fopen in the subprocess to fail on a system
+  //   suffering from bug 1085341.
+  //
+  // (Yes, the default setting of the close-on-exec flag is a Unix
+  // design flaw)
+  //
+  // See:
+  // 1085341: 32-bit stdio routines should support file descriptors >255
+  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
+  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
+  //
 #ifdef FD_CLOEXEC
-    {
-        int flags = ::fcntl(fd, F_GETFD);
-        if (flags != -1)
-            ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
+  {
+    int flags = ::fcntl(fd, F_GETFD);
+    if (flags != -1) {
+      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
     }
+  }
 #endif
 
   if (o_delete != 0) {
@@ -5174,11 +5196,9 @@
   if (::fstat64(fd, &buf64) >= 0) {
     mode = buf64.st_mode;
     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
-      /*
-      * XXX: is the following call interruptible? If so, this might
-      * need to go through the INTERRUPT_IO() wrapper as for other
-      * blocking, interruptible calls in this file.
-      */
+      // XXX: is the following call interruptible? If so, this might
+      // need to go through the INTERRUPT_IO() wrapper as for other
+      // blocking, interruptible calls in this file.
       int n;
       if (::ioctl(fd, FIONREAD, &n) >= 0) {
         *bytes = n;
@@ -5208,8 +5228,8 @@
 
 // Map a block of memory.
 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
-                     char *addr, size_t bytes, bool read_only,
-                     bool allow_exec) {
+                        char *addr, size_t bytes, bool read_only,
+                        bool allow_exec) {
   int prot;
   int flags = MAP_PRIVATE;
 
@@ -5238,8 +5258,8 @@
 
 // Remap a block of memory.
 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
-                       char *addr, size_t bytes, bool read_only,
-                       bool allow_exec) {
+                          char *addr, size_t bytes, bool read_only,
+                          bool allow_exec) {
   // same as map_memory() on this OS
   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
                         allow_exec);
@@ -5304,10 +5324,7 @@
   }
 }
 
-//
 //  -1 on error.
-//
-
 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
   pid_t  tid = thread->osthread()->thread_id();
   char *s;
@@ -5394,7 +5411,7 @@
     }
   } else {
     jio_fprintf(stderr,
-      "Could not open pause file '%s', continuing immediately.\n", filename);
+                "Could not open pause file '%s', continuing immediately.\n", filename);
   }
 }
 
@@ -5505,28 +5522,28 @@
 
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v == 0) {
-     // Do this the hard way by blocking ...
-     int status = pthread_mutex_lock(_mutex);
-     assert_status(status == 0, status, "mutex_lock");
-     guarantee(_nParked == 0, "invariant");
-     ++_nParked;
-     while (_Event < 0) {
-        status = pthread_cond_wait(_cond, _mutex);
-        // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
-        // Treat this the same as if the wait was interrupted
-        if (status == ETIME) { status = EINTR; }
-        assert_status(status == 0 || status == EINTR, status, "cond_wait");
-     }
-     --_nParked;
+    // Do this the hard way by blocking ...
+    int status = pthread_mutex_lock(_mutex);
+    assert_status(status == 0, status, "mutex_lock");
+    guarantee(_nParked == 0, "invariant");
+    ++_nParked;
+    while (_Event < 0) {
+      status = pthread_cond_wait(_cond, _mutex);
+      // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
+      // Treat this the same as if the wait was interrupted
+      if (status == ETIME) { status = EINTR; }
+      assert_status(status == 0 || status == EINTR, status, "cond_wait");
+    }
+    --_nParked;
 
     _Event = 0;
-     status = pthread_mutex_unlock(_mutex);
-     assert_status(status == 0, status, "mutex_unlock");
+    status = pthread_mutex_unlock(_mutex);
+    assert_status(status == 0, status, "mutex_unlock");
     // Paranoia to ensure our locked and lock-free paths interact
     // correctly with each other.
     OrderAccess::fence();
@@ -5539,8 +5556,8 @@
 
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v != 0) return OS_OK;
@@ -5584,7 +5601,7 @@
   }
   --_nParked;
   if (_Event >= 0) {
-     ret = OS_OK;
+    ret = OS_OK;
   }
   _Event = 0;
   status = pthread_mutex_unlock(_mutex);
@@ -5640,35 +5657,31 @@
 // JSR166
 // -------------------------------------------------------
 
-/*
- * The solaris and linux implementations of park/unpark are fairly
- * conservative for now, but can be improved. They currently use a
- * mutex/condvar pair, plus a a count.
- * Park decrements count if > 0, else does a condvar wait.  Unpark
- * sets count to 1 and signals condvar.  Only one thread ever waits
- * on the condvar. Contention seen when trying to park implies that someone
- * is unparking you, so don't wait. And spurious returns are fine, so there
- * is no need to track notifications.
- */
-
-/*
- * This code is common to linux and solaris and will be moved to a
- * common place in dolphin.
- *
- * The passed in time value is either a relative time in nanoseconds
- * or an absolute time in milliseconds. Either way it has to be unpacked
- * into suitable seconds and nanoseconds components and stored in the
- * given timespec structure.
- * Given time is a 64-bit value and the time_t used in the timespec is only
- * a signed-32-bit value (except on 64-bit Linux) we have to watch for
- * overflow if times way in the future are given. Further on Solaris versions
- * prior to 10 there is a restriction (see cond_timedwait) that the specified
- * number of seconds, in abstime, is less than current_time  + 100,000,000.
- * As it will be 28 years before "now + 100000000" will overflow we can
- * ignore overflow and just impose a hard-limit on seconds using the value
- * of "now + 100,000,000". This places a limit on the timeout of about 3.17
- * years from "now".
- */
+// The solaris and linux implementations of park/unpark are fairly
+// conservative for now, but can be improved. They currently use a
+// mutex/condvar pair, plus a a count.
+// Park decrements count if > 0, else does a condvar wait.  Unpark
+// sets count to 1 and signals condvar.  Only one thread ever waits
+// on the condvar. Contention seen when trying to park implies that someone
+// is unparking you, so don't wait. And spurious returns are fine, so there
+// is no need to track notifications.
+
+// This code is common to linux and solaris and will be moved to a
+// common place in dolphin.
+//
+// The passed in time value is either a relative time in nanoseconds
+// or an absolute time in milliseconds. Either way it has to be unpacked
+// into suitable seconds and nanoseconds components and stored in the
+// given timespec structure.
+// Given time is a 64-bit value and the time_t used in the timespec is only
+// a signed-32-bit value (except on 64-bit Linux) we have to watch for
+// overflow if times way in the future are given. Further on Solaris versions
+// prior to 10 there is a restriction (see cond_timedwait) that the specified
+// number of seconds, in abstime, is less than current_time  + 100,000,000.
+// As it will be 28 years before "now + 100000000" will overflow we can
+// ignore overflow and just impose a hard-limit on seconds using the value
+// of "now + 100,000,000". This places a limit on the timeout of about 3.17
+// years from "now".
 
 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
   assert(time > 0, "convertTime");
@@ -5839,14 +5852,14 @@
     if (_cur_index != -1) {
       // thread is definitely parked
       if (WorkAroundNPTLTimedWaitHang) {
-        status = pthread_cond_signal (&_cond[_cur_index]);
+        status = pthread_cond_signal(&_cond[_cur_index]);
         assert(status == 0, "invariant");
         status = pthread_mutex_unlock(_mutex);
         assert(status == 0, "invariant");
       } else {
         status = pthread_mutex_unlock(_mutex);
         assert(status == 0, "invariant");
-        status = pthread_cond_signal (&_cond[_cur_index]);
+        status = pthread_cond_signal(&_cond[_cur_index]);
         assert(status == 0, "invariant");
       }
     } else {
@@ -5863,11 +5876,11 @@
 extern char** environ;
 
 #ifndef __NR_fork
-#define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57)
+  #define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57)
 #endif
 
 #ifndef __NR_execve
-#define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59)
+  #define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59)
 #endif
 
 // Run the specified command in a separate process. Return its exit value,
@@ -5883,7 +5896,7 @@
   // On IA64 there's no fork syscall, we have to use fork() and hope for
   // the best...
   pid_t pid = NOT_IA64(syscall(__NR_fork);)
-              IA64_ONLY(fork();)
+  IA64_ONLY(fork();)
 
   if (pid < 0) {
     // fork failed
@@ -5914,26 +5927,26 @@
     // Wait for the child process to exit.  This returns immediately if
     // the child has already exited. */
     while (waitpid(pid, &status, 0) < 0) {
-        switch (errno) {
-        case ECHILD: return 0;
-        case EINTR: break;
-        default: return -1;
-        }
+      switch (errno) {
+      case ECHILD: return 0;
+      case EINTR: break;
+      default: return -1;
+      }
     }
 
     if (WIFEXITED(status)) {
-       // The child exited normally; get its exit code.
-       return WEXITSTATUS(status);
+      // The child exited normally; get its exit code.
+      return WEXITSTATUS(status);
     } else if (WIFSIGNALED(status)) {
-       // The child exited because of a signal
-       // The best value to return is 0x80 + signal number,
-       // because that is what all Unix shells do, and because
-       // it allows callers to distinguish between process exit and
-       // process death by signal.
-       return 0x80 + WTERMSIG(status);
+      // The child exited because of a signal
+      // The best value to return is 0x80 + signal number,
+      // because that is what all Unix shells do, and because
+      // it allows callers to distinguish between process exit and
+      // process death by signal.
+      return 0x80 + WTERMSIG(status);
     } else {
-       // Unknown exit code; pass it through
-       return status;
+      // Unknown exit code; pass it through
+      return status;
     }
   }
 }
@@ -5947,37 +5960,43 @@
 // as libawt.so, and renamed libawt_xawt.so
 //
 bool os::is_headless_jre() {
-    struct stat statbuf;
-    char buf[MAXPATHLEN];
-    char libmawtpath[MAXPATHLEN];
-    const char *xawtstr  = "/xawt/libmawt.so";
-    const char *new_xawtstr = "/libawt_xawt.so";
-    char *p;
-
-    // Get path to libjvm.so
-    os::jvm_path(buf, sizeof(buf));
-
-    // Get rid of libjvm.so
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // Get rid of client or server
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // check xawt/libmawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    // check libawt_xawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, new_xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    return true;
+  struct stat statbuf;
+  char buf[MAXPATHLEN];
+  char libmawtpath[MAXPATHLEN];
+  const char *xawtstr  = "/xawt/libmawt.so";
+  const char *new_xawtstr = "/libawt_xawt.so";
+  char *p;
+
+  // Get path to libjvm.so
+  os::jvm_path(buf, sizeof(buf));
+
+  // Get rid of libjvm.so
+  p = strrchr(buf, '/');
+  if (p == NULL) {
+    return false;
+  } else {
+    *p = '\0';
+  }
+
+  // Get rid of client or server
+  p = strrchr(buf, '/');
+  if (p == NULL) {
+    return false;
+  } else {
+    *p = '\0';
+  }
+
+  // check xawt/libmawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  // check libawt_xawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, new_xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  return true;
 }
 
 // Get the default path to the core file
@@ -6052,14 +6071,13 @@
   }
 }
 
-//
 // See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
 //
 void MemNotifyThread::start() {
-  int    fd;
-  fd = open ("/dev/mem_notify", O_RDONLY, 0);
+  int fd;
+  fd = open("/dev/mem_notify", O_RDONLY, 0);
   if (fd < 0) {
-      return;
+    return;
   }
 
   if (memnotify_thread() == NULL) {
@@ -6074,12 +6092,12 @@
 
 #ifndef PRODUCT
 
-#define test_log(...) \
-  do {\
-    if (VerboseInternalVMTests) { \
-      tty->print_cr(__VA_ARGS__); \
-      tty->flush(); \
-    }\
+#define test_log(...)              \
+  do {                             \
+    if (VerboseInternalVMTests) {  \
+      tty->print_cr(__VA_ARGS__);  \
+      tty->flush();                \
+    }                              \
   } while (false)
 
 class TestReserveMemorySpecial : AllStatic {
@@ -6123,11 +6141,11 @@
 
   static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
     if (!UseHugeTLBFS) {
-        return;
+      return;
     }
 
     test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
-        size, alignment);
+             size, alignment);
 
     assert(size >= os::large_page_size(), "Incorrect input to test");
 
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Wed Sep 10 17:06:36 2014 -0700
@@ -27,8 +27,8 @@
 
 // Linux_OS defines the interface to Linux operating systems
 
-/* pthread_getattr_np comes with LinuxThreads-0.9-7 on RedHat 7.1 */
-typedef int (*pthread_getattr_func_type) (pthread_t, pthread_attr_t *);
+// pthread_getattr_np comes with LinuxThreads-0.9-7 on RedHat 7.1
+typedef int (*pthread_getattr_func_type)(pthread_t, pthread_attr_t *);
 
 // Information about the protection of the page at address '0' on this os.
 static bool zero_page_read_protected() { return true; }
@@ -151,7 +151,7 @@
   // that file provides extensions to the os class and not the
   // Linux class.
   static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
-    intptr_t** ret_sp, intptr_t** ret_fp);
+                                              intptr_t** ret_sp, intptr_t** ret_fp);
 
   // This boolean allows users to forward their own non-matching signals
   // to JVM_handle_linux_signal, harmlessly.
@@ -222,10 +222,10 @@
   static jlong fast_thread_cpu_time(clockid_t clockid);
 
   // pthread_cond clock suppport
-  private:
+ private:
   static pthread_condattr_t _condattr[1];
 
-  public:
+ public:
   static pthread_condattr_t* condAttr() { return _condattr; }
 
   // Stack repair handling
@@ -235,7 +235,7 @@
   // LinuxThreads work-around for 6292965
   static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
 
-private:
+ private:
   typedef int (*sched_getcpu_func_t)(void);
   typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
   typedef int (*numa_max_node_func_t)(void);
@@ -262,7 +262,7 @@
   static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
   static int sched_getcpu_syscall(void);
-public:
+ public:
   static int sched_getcpu()  { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
   static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
     return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
@@ -287,63 +287,63 @@
 
 
 class PlatformEvent : public CHeapObj<mtInternal> {
-  private:
-    double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
-    volatile int _Event;
-    volatile int _nParked;
-    pthread_mutex_t _mutex[1];
-    pthread_cond_t  _cond[1];
-    double PostPad[2];
-    Thread * _Assoc;
+ private:
+  double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
+  volatile int _Event;
+  volatile int _nParked;
+  pthread_mutex_t _mutex[1];
+  pthread_cond_t  _cond[1];
+  double PostPad[2];
+  Thread * _Assoc;
 
-  public:       // TODO-FIXME: make dtor private
-    ~PlatformEvent() { guarantee(0, "invariant"); }
+ public:       // TODO-FIXME: make dtor private
+  ~PlatformEvent() { guarantee(0, "invariant"); }
 
-  public:
-    PlatformEvent() {
-      int status;
-      status = pthread_cond_init (_cond, os::Linux::condAttr());
-      assert_status(status == 0, status, "cond_init");
-      status = pthread_mutex_init (_mutex, NULL);
-      assert_status(status == 0, status, "mutex_init");
-      _Event   = 0;
-      _nParked = 0;
-      _Assoc   = NULL;
-    }
+ public:
+  PlatformEvent() {
+    int status;
+    status = pthread_cond_init(_cond, os::Linux::condAttr());
+    assert_status(status == 0, status, "cond_init");
+    status = pthread_mutex_init(_mutex, NULL);
+    assert_status(status == 0, status, "mutex_init");
+    _Event   = 0;
+    _nParked = 0;
+    _Assoc   = NULL;
+  }
 
-    // Use caution with reset() and fired() -- they may require MEMBARs
-    void reset() { _Event = 0; }
-    int  fired() { return _Event; }
-    void park();
-    void unpark();
-    int  park(jlong millis); // relative timed-wait only
-    void SetAssociation(Thread * a) { _Assoc = a; }
+  // Use caution with reset() and fired() -- they may require MEMBARs
+  void reset() { _Event = 0; }
+  int  fired() { return _Event; }
+  void park();
+  void unpark();
+  int  park(jlong millis); // relative timed-wait only
+  void SetAssociation(Thread * a) { _Assoc = a; }
 };
 
 class PlatformParker : public CHeapObj<mtInternal> {
-  protected:
-    enum {
-        REL_INDEX = 0,
-        ABS_INDEX = 1
-    };
-    int _cur_index;  // which cond is in use: -1, 0, 1
-    pthread_mutex_t _mutex[1];
-    pthread_cond_t  _cond[2]; // one for relative times and one for abs.
+ protected:
+  enum {
+    REL_INDEX = 0,
+    ABS_INDEX = 1
+  };
+  int _cur_index;  // which cond is in use: -1, 0, 1
+  pthread_mutex_t _mutex[1];
+  pthread_cond_t  _cond[2]; // one for relative times and one for abs.
 
-  public:       // TODO-FIXME: make dtor private
-    ~PlatformParker() { guarantee(0, "invariant"); }
+ public:       // TODO-FIXME: make dtor private
+  ~PlatformParker() { guarantee(0, "invariant"); }
 
-  public:
-    PlatformParker() {
-      int status;
-      status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
-      assert_status(status == 0, status, "cond_init rel");
-      status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
-      assert_status(status == 0, status, "cond_init abs");
-      status = pthread_mutex_init (_mutex, NULL);
-      assert_status(status == 0, status, "mutex_init");
-      _cur_index = -1; // mark as unused
-    }
+ public:
+  PlatformParker() {
+    int status;
+    status = pthread_cond_init(&_cond[REL_INDEX], os::Linux::condAttr());
+    assert_status(status == 0, status, "cond_init rel");
+    status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
+    assert_status(status == 0, status, "cond_init abs");
+    status = pthread_mutex_init(_mutex, NULL);
+    assert_status(status == 0, status, "mutex_init");
+    _cur_index = -1; // mark as unused
+  }
 };
 
 #endif // OS_LINUX_VM_OS_LINUX_HPP
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Sep 10 17:06:36 2014 -0700
@@ -124,17 +124,17 @@
 // compile on older systems without this header file.
 
 #ifndef MADV_ACCESS_LWP
-# define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
+  #define  MADV_ACCESS_LWP   7       /* next LWP to access heavily */
 #endif
 #ifndef MADV_ACCESS_MANY
-# define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
+  #define  MADV_ACCESS_MANY  8       /* many processes to access heavily */
 #endif
 
 #ifndef LGRP_RSRC_CPU
-# define LGRP_RSRC_CPU           0       /* CPU resources */
+  #define LGRP_RSRC_CPU      0       /* CPU resources */
 #endif
 #ifndef LGRP_RSRC_MEM
-# define LGRP_RSRC_MEM           1       /* memory resources */
+  #define LGRP_RSRC_MEM      1       /* memory resources */
 #endif
 
 // see thr_setprio(3T) for the basis of these numbers
@@ -190,7 +190,7 @@
 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 
 #ifndef PRODUCT
-#define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
+  #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 
 int ThreadLocalStorage::_tcacheHit = 0;
 int ThreadLocalStorage::_tcacheMiss = 0;
@@ -200,7 +200,7 @@
   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 }
-#undef _PCT
+  #undef _PCT
 #endif // PRODUCT
 
 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
@@ -210,8 +210,8 @@
     address sp = os::current_stack_pointer();
     guarantee(thread->_stack_base == NULL ||
               (sp <= thread->_stack_base &&
-                 sp >= thread->_stack_base - thread->_stack_size) ||
-               is_error_reported(),
+              sp >= thread->_stack_base - thread->_stack_size) ||
+              is_error_reported(),
               "sp must be inside of selected thread stack");
 
     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
@@ -332,7 +332,7 @@
 
 static int _processors_online = 0;
 
-         jint os::Solaris::_os_thread_limit = 0;
+jint os::Solaris::_os_thread_limit = 0;
 volatile jint os::Solaris::_os_thread_count = 0;
 
 julong os::available_memory() {
@@ -346,7 +346,7 @@
 julong os::Solaris::_physical_memory = 0;
 
 julong os::physical_memory() {
-   return Solaris::physical_memory();
+  return Solaris::physical_memory();
 }
 
 static hrtime_t first_hrtime = 0;
@@ -356,8 +356,9 @@
 
 void os::Solaris::initialize_system_info() {
   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
-  _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
-  _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
+  _processors_online = sysconf(_SC_NPROCESSORS_ONLN);
+  _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) *
+                                     (julong)sysconf(_SC_PAGESIZE);
 }
 
 int os::active_processor_count() {
@@ -432,14 +433,14 @@
     next += 1;
   }
   if (found < *id_length) {
-      // The loop above didn't identify the expected number of processors.
-      // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
-      // and re-running the loop, above, but there's no guarantee of progress
-      // if the system configuration is in flux.  Instead, we just return what
-      // we've got.  Note that in the worst case find_processors_online() could
-      // return an empty set.  (As a fall-back in the case of the empty set we
-      // could just return the ID of the current processor).
-      *id_length = found;
+    // The loop above didn't identify the expected number of processors.
+    // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
+    // and re-running the loop, above, but there's no guarantee of progress
+    // if the system configuration is in flux.  Instead, we just return what
+    // we've got.  Note that in the worst case find_processors_online() could
+    // return an empty set.  (As a fall-back in the case of the empty set we
+    // could just return the ID of the current processor).
+    *id_length = found;
   }
 
   return true;
@@ -556,9 +557,8 @@
 
 bool os::getenv(const char* name, char* buffer, int len) {
   char* val = ::getenv(name);
-  if (val == NULL
-  ||   strlen(val) + 1  >  len ) {
-    if (len > 0)  buffer[0] = 0; // return a null string
+  if (val == NULL || strlen(val) + 1 > len) {
+    if (len > 0) buffer[0] = 0; // return a null string
     return false;
   }
   strcpy(buffer, val);
@@ -780,8 +780,7 @@
   BREAKPOINT;
 }
 
-bool os::obsolete_option(const JavaVMOption *option)
-{
+bool os::obsolete_option(const JavaVMOption *option) {
   if (!strncmp(option->optionString, "-Xt", 3)) {
     return true;
   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
@@ -906,7 +905,6 @@
 }
 
 void os::Solaris::hotspot_sigmask(Thread* thread) {
-
   //Save caller's signal mask
   sigset_t sigmask;
   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
@@ -932,7 +930,7 @@
 #endif
   OSThread* osthread = create_os_thread(thread, thr_self());
   if (osthread == NULL) {
-     return false;
+    return false;
   }
 
   // Initial thread state is RUNNABLE
@@ -952,9 +950,9 @@
 #endif
   if (_starting_thread == NULL) {
     _starting_thread = create_os_thread(thread, main_thread);
-     if (_starting_thread == NULL) {
-        return false;
-     }
+    if (_starting_thread == NULL) {
+      return false;
+    }
   }
 
   // The primodial thread is runnable from the start
@@ -970,7 +968,8 @@
 }
 
 
-bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
+bool os::create_thread(Thread* thread, ThreadType thr_type,
+                       size_t stack_size) {
   // Allocate the OSThread object
   OSThread* osthread = new OSThread(NULL, NULL);
   if (osthread == NULL) {
@@ -980,27 +979,27 @@
   if (ThreadPriorityVerbose) {
     char *thrtyp;
     switch (thr_type) {
-      case vm_thread:
-        thrtyp = (char *)"vm";
-        break;
-      case cgc_thread:
-        thrtyp = (char *)"cgc";
-        break;
-      case pgc_thread:
-        thrtyp = (char *)"pgc";
-        break;
-      case java_thread:
-        thrtyp = (char *)"java";
-        break;
-      case compiler_thread:
-        thrtyp = (char *)"compiler";
-        break;
-      case watcher_thread:
-        thrtyp = (char *)"watcher";
-        break;
-      default:
-        thrtyp = (char *)"unknown";
-        break;
+    case vm_thread:
+      thrtyp = (char *)"vm";
+      break;
+    case cgc_thread:
+      thrtyp = (char *)"cgc";
+      break;
+    case pgc_thread:
+      thrtyp = (char *)"pgc";
+      break;
+    case java_thread:
+      thrtyp = (char *)"java";
+      break;
+    case compiler_thread:
+      thrtyp = (char *)"compiler";
+      break;
+    case watcher_thread:
+      thrtyp = (char *)"watcher";
+      break;
+    default:
+      thrtyp = (char *)"unknown";
+      break;
     }
     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
   }
@@ -1088,14 +1087,14 @@
   return true;
 }
 
-/* defined for >= Solaris 10. This allows builds on earlier versions
- *  of Solaris to take advantage of the newly reserved Solaris JVM signals
- *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
- *  and -XX:+UseAltSigs does nothing since these should have no conflict
- */
+// defined for >= Solaris 10. This allows builds on earlier versions
+// of Solaris to take advantage of the newly reserved Solaris JVM signals
+// With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
+// and -XX:+UseAltSigs does nothing since these should have no conflict
+//
 #if !defined(SIGJVM1)
-#define SIGJVM1 39
-#define SIGJVM2 40
+  #define SIGJVM1 39
+  #define SIGJVM2 40
 #endif
 
 debug_only(static bool signal_sets_initialized = false);
@@ -1104,14 +1103,15 @@
 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
 
 bool os::Solaris::is_sig_ignored(int sig) {
-      struct sigaction oact;
-      sigaction(sig, (struct sigaction*)NULL, &oact);
-      void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
-                                     : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
-      if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
-           return true;
-      else
-           return false;
+  struct sigaction oact;
+  sigaction(sig, (struct sigaction*)NULL, &oact);
+  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
+                                 : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
+  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
+    return true;
+  } else {
+    return false;
+  }
 }
 
 // Note: SIGRTMIN is a macro that calls sysconf() so it will
@@ -1158,23 +1158,24 @@
   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
 
   if (!ReduceSignalUsage) {
-   if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
+    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
-   }
-   if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
+    }
+    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
-   }
-   if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
+    }
+    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
-   }
+    }
   }
   // Fill in signals that are blocked by all but the VM thread.
   sigemptyset(&vm_sigs);
-  if (!ReduceSignalUsage)
+  if (!ReduceSignalUsage) {
     sigaddset(&vm_sigs, BREAK_SIGNAL);
+  }
   debug_only(signal_sets_initialized = true);
 
   // For diagnostics only used in run_periodic_checks
@@ -1244,21 +1245,20 @@
     assert(stack_size > 0, "Stack size calculation problem");
 
     if (stack_size > jt->stack_size()) {
-      NOT_PRODUCT(
-        struct rlimit limits;
-        getrlimit(RLIMIT_STACK, &limits);
-        size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
-        assert(size >= jt->stack_size(), "Stack size problem in main thread");
-      )
-      tty->print_cr(
-        "Stack size of %d Kb exceeds current limit of %d Kb.\n"
-        "(Stack sizes are rounded up to a multiple of the system page size.)\n"
-        "See limit(1) to increase the stack size limit.",
-        stack_size / K, jt->stack_size() / K);
+#ifndef PRODUCT
+      struct rlimit limits;
+      getrlimit(RLIMIT_STACK, &limits);
+      size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
+      assert(size >= jt->stack_size(), "Stack size problem in main thread");
+#endif
+      tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n"
+                    "(Stack sizes are rounded up to a multiple of the system page size.)\n"
+                    "See limit(1) to increase the stack size limit.",
+                    stack_size / K, jt->stack_size() / K);
       vm_exit(1);
     }
     assert(jt->stack_size() >= stack_size,
-          "Attempt to map more stack than was allocated");
+           "Attempt to map more stack than was allocated");
     jt->set_stack_size(stack_size);
   }
 
@@ -1281,7 +1281,7 @@
   // The main thread must take the VMThread down synchronously
   // before the main thread exits and frees up CodeHeap
   guarantee((Thread::current()->osthread() == osthread
-     || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
+             || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
   if (Thread::current()->osthread() == osthread) {
     // Restore caller's signal mask
     sigset_t sigmask = osthread->caller_sigmask();
@@ -1325,32 +1325,35 @@
   //           JavaThread in Java code, and have stubs simply
   //           treat %g2 as a caller-save register, preserving it in a %lN.
   thread_key_t tk;
-  if (thr_keycreate( &tk, NULL))
+  if (thr_keycreate(&tk, NULL)) {
     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
                   "(%s)", strerror(errno)));
+  }
   return int(tk);
 }
 
 void os::free_thread_local_storage(int index) {
   // %%% don't think we need anything here
-  // if ( pthread_key_delete((pthread_key_t) tk) )
+  // if (pthread_key_delete((pthread_key_t) tk)) {
   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
-}
-
-#define SMALLINT 32   // libthread allocate for tsd_common is a version specific
-                      // small number - point is NO swap space available
+  // }
+}
+
+// libthread allocate for tsd_common is a version specific
+// small number - point is NO swap space available
+#define SMALLINT 32
 void os::thread_local_storage_at_put(int index, void* value) {
   // %%% this is used only in threadLocalStorage.cpp
   if (thr_setspecific((thread_key_t)index, value)) {
     if (errno == ENOMEM) {
-       vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
-                             "thr_setspecific: out of swap space");
+      vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
+                            "thr_setspecific: out of swap space");
     } else {
       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
                     "(%s)", strerror(errno)));
     }
   } else {
-      ThreadLocalStorage::set_thread_in_slot((Thread *) value);
+    ThreadLocalStorage::set_thread_in_slot((Thread *) value);
   }
 }
 
@@ -1402,14 +1405,14 @@
 }
 
 jlong os::elapsed_frequency() {
-   return hrtime_hz;
+  return hrtime_hz;
 }
 
 // Return the real, user, and system times in seconds from an
 // arbitrary fixed point in the past.
 bool os::getTimesSecs(double* process_real_time,
-                  double* process_user_time,
-                  double* process_system_time) {
+                      double* process_user_time,
+                      double* process_system_time) {
   struct tms ticks;
   clock_t real_ticks = times(&ticks);
 
@@ -1431,29 +1434,31 @@
 
 bool os::enable_vtime() {
   int fd = ::open("/proc/self/ctl", O_WRONLY);
-  if (fd == -1)
+  if (fd == -1) {
     return false;
+  }
 
   long cmd[] = { PCSET, PR_MSACCT };
   int res = ::write(fd, cmd, sizeof(long) * 2);
   ::close(fd);
-  if (res != sizeof(long) * 2)
+  if (res != sizeof(long) * 2) {
     return false;
-
+  }
   return true;
 }
 
 bool os::vtime_enabled() {
   int fd = ::open("/proc/self/status", O_RDONLY);
-  if (fd == -1)
+  if (fd == -1) {
     return false;
+  }
 
   pstatus_t status;
   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
   ::close(fd);
-  if (res != sizeof(pstatus_t))
+  if (res != sizeof(pstatus_t)) {
     return false;
-
+  }
   return status.pr_flags & PR_MSACCT;
 }
 
@@ -1471,8 +1476,9 @@
 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
 jlong os::javaTimeMillis() {
   timeval t;
-  if (gettimeofday( &t, NULL) == -1)
+  if (gettimeofday(&t, NULL) == -1) {
     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
+  }
   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
 }
 
@@ -1625,7 +1631,7 @@
   return false;
 }
 
-typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
+typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int);
 static dladdr1_func_type dladdr1_func = NULL;
 
 bool os::dll_address_to_function_name(address addr, char *buf,
@@ -1643,9 +1649,9 @@
     // available even if the vm is built on a machine that does
     // not have dladdr1 support.  Make sure there is a value for
     // RTLD_DL_SYMENT.
-    #ifndef RTLD_DL_SYMENT
-    #define RTLD_DL_SYMENT 1
-    #endif
+#ifndef RTLD_DL_SYMENT
+  #define RTLD_DL_SYMENT 1
+#endif
 #ifdef _LP64
     Elf64_Sym * info;
 #else
@@ -1772,12 +1778,11 @@
   }
 }
 
-  // Loads .dll/.so and
-  // in case of error it checks if .dll/.so was built for the
-  // same architecture as Hotspot is running on
-
-void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
-{
+// Loads .dll/.so and
+// in case of error it checks if .dll/.so was built for the
+// same architecture as Hotspot is running on
+
+void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
   void * result= ::dlopen(filename, RTLD_LAZY);
   if (result != NULL) {
     // Successful loading
@@ -1808,7 +1813,7 @@
 
   bool failed_to_read_elf_head=
     (sizeof(elf_head)!=
-        (::read(file_descriptor, &elf_head,sizeof(elf_head))));
+     (::read(file_descriptor, &elf_head,sizeof(elf_head))));
 
   ::close(file_descriptor);
   if (failed_to_read_elf_head) {
@@ -1837,26 +1842,26 @@
     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
   };
 
-  #if  (defined IA32)
-    static  Elf32_Half running_arch_code=EM_386;
-  #elif   (defined AMD64)
-    static  Elf32_Half running_arch_code=EM_X86_64;
-  #elif  (defined IA64)
-    static  Elf32_Half running_arch_code=EM_IA_64;
-  #elif  (defined __sparc) && (defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARCV9;
-  #elif  (defined __sparc) && (!defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARC;
-  #elif  (defined __powerpc64__)
-    static  Elf32_Half running_arch_code=EM_PPC64;
-  #elif  (defined __powerpc__)
-    static  Elf32_Half running_arch_code=EM_PPC;
-  #elif (defined ARM)
-    static  Elf32_Half running_arch_code=EM_ARM;
-  #else
-    #error Method os::dll_load requires that one of following is defined:\
-         IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
-  #endif
+#if  (defined IA32)
+  static  Elf32_Half running_arch_code=EM_386;
+#elif   (defined AMD64)
+  static  Elf32_Half running_arch_code=EM_X86_64;
+#elif  (defined IA64)
+  static  Elf32_Half running_arch_code=EM_IA_64;
+#elif  (defined __sparc) && (defined _LP64)
+  static  Elf32_Half running_arch_code=EM_SPARCV9;
+#elif  (defined __sparc) && (!defined _LP64)
+  static  Elf32_Half running_arch_code=EM_SPARC;
+#elif  (defined __powerpc64__)
+  static  Elf32_Half running_arch_code=EM_PPC64;
+#elif  (defined __powerpc__)
+  static  Elf32_Half running_arch_code=EM_PPC;
+#elif (defined ARM)
+  static  Elf32_Half running_arch_code=EM_ARM;
+#else
+  #error Method os::dll_load requires that one of following is defined:\
+       IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
+#endif
 
   // Identify compatability class for VM's architecture and library's architecture
   // Obtain string descriptions for architectures
@@ -1875,7 +1880,7 @@
   }
 
   assert(running_arch_index != -1,
-    "Didn't find running architecture code (running_arch_code) in arch_array");
+         "Didn't find running architecture code (running_arch_code) in arch_array");
   if (running_arch_index == -1) {
     // Even though running architecture detection failed
     // we may still continue with reporting dlerror() message
@@ -1895,13 +1900,13 @@
   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
     if (lib_arch.name!=NULL) {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-        " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
-        lib_arch.name, arch_array[running_arch_index].name);
+                 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
+                 lib_arch.name, arch_array[running_arch_index].name);
     } else {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-      " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
-        lib_arch.code,
-        arch_array[running_arch_index].name);
+                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
+                 lib_arch.code,
+                 arch_array[running_arch_index].name);
     }
   }
 
@@ -1929,7 +1934,7 @@
 static bool _print_ascii_file(const char* filename, outputStream* st) {
   int fd = ::open(filename, O_RDONLY);
   if (fd == -1) {
-     return false;
+    return false;
   }
 
   char buf[32];
@@ -1967,9 +1972,9 @@
 
 void os::Solaris::print_distro_info(outputStream* st) {
   if (!_print_ascii_file("/etc/release", st)) {
-      st->print("Solaris");
-    }
-    st->cr();
+    st->print("Solaris");
+  }
+  st->cr();
 }
 
 void os::Solaris::print_libversion_info(outputStream* st) {
@@ -2068,7 +2073,7 @@
 }
 
 static void print_signal_handler(outputStream* st, int sig,
-                                  char* buf, size_t buflen) {
+                                 char* buf, size_t buflen) {
   struct sigaction sa;
 
   sigaction(sig, NULL, &sa);
@@ -2102,13 +2107,13 @@
 
   // Check: is it our handler?
   if (handler == CAST_FROM_FN_PTR(address, signalHandler) ||
-     handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
+      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
     // It is our signal handler
     // check for flags
     if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
       st->print(
-        ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
-        os::Solaris::get_our_sigflags(sig));
+                ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
+                os::Solaris::get_our_sigflags(sig));
     }
   }
   st->cr();
@@ -2232,7 +2237,6 @@
 // from src/solaris/hpi/src/system_md.c
 
 size_t os::lasterror(char *buf, size_t len) {
-
   if (errno == 0)  return 0;
 
   const char *s = ::strerror(errno);
@@ -2253,7 +2257,7 @@
     // Ctrl-C is pressed during error reporting, likely because the error
     // handler fails to abort. Let VM die immediately.
     if (sig == SIGINT && is_error_reported()) {
-       os::die();
+      os::die();
     }
 
     os::signal_notify(sig);
@@ -2266,15 +2270,15 @@
 }
 
 class Semaphore : public StackObj {
-  public:
-    Semaphore();
-    ~Semaphore();
-    void signal();
-    void wait();
-    bool trywait();
-    bool timedwait(unsigned int sec, int nsec);
-  private:
-    sema_t _semaphore;
+ public:
+  Semaphore();
+  ~Semaphore();
+  void signal();
+  void wait();
+  bool trywait();
+  bool timedwait(unsigned int sec, int nsec);
+ private:
+  sema_t _semaphore;
 };
 
 
@@ -2327,9 +2331,10 @@
   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
 
-  if (sigaction(signal_number, &sigAct, &oldSigAct))
+  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
     // -1 means registration failed
     return (void *)-1;
+  }
 
   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
 }
@@ -2338,10 +2343,8 @@
   raise(signal_number);
 }
 
-/*
- * The following code is moved from os.cpp for making this
- * code platform specific, which it is by its very nature.
- */
+// The following code is moved from os.cpp for making this
+// code platform specific, which it is by its very nature.
 
 // a counter for each possible signal value
 static int Sigexit = 0;
@@ -2373,13 +2376,13 @@
   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
 
   if (UseSignalChaining) {
-     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
-       * (Maxsignum + 1), mtInternal);
-     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
-     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
-     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
-  }
-  ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
+    chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
+                                                   * (Maxsignum + 1), mtInternal);
+    memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
+    preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
+    memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
+  }
+  ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
 }
 
@@ -2418,18 +2421,16 @@
       thread->set_suspend_equivalent();
       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
       while ((ret = ::sema_wait(&sig_sem)) == EINTR)
-          ;
+        ;
       assert(ret == 0, "sema_wait() failed");
 
       // were we externally suspended while we were waiting?
       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
       if (threadIsSuspended) {
-        //
         // The semaphore has been incremented, but while we were waiting
         // another thread suspended us. We don't want to continue running
         // while suspended because that would surprise the thread that
         // suspended us.
-        //
         ret = ::sema_post(&sig_sem);
         assert(ret == 0, "sema_post() failed");
 
@@ -2635,37 +2636,37 @@
 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
 // board. An LWP is assigned to one of these groups upon creation.
 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
-   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
-     ids[0] = 0;
-     return 1;
-   }
-   int result_size = 0, top = 1, bottom = 0, cur = 0;
-   for (int k = 0; k < size; k++) {
-     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
-                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
-     if (r == -1) {
-       ids[0] = 0;
-       return 1;
-     }
-     if (!r) {
-       // That's a leaf node.
-       assert(bottom <= cur, "Sanity check");
-       // Check if the node has memory
-       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
-                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
-         ids[bottom++] = ids[cur];
-       }
-     }
-     top += r;
-     cur++;
-   }
-   if (bottom == 0) {
-     // Handle a situation, when the OS reports no memory available.
-     // Assume UMA architecture.
-     ids[0] = 0;
-     return 1;
-   }
-   return bottom;
+  if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
+    ids[0] = 0;
+    return 1;
+  }
+  int result_size = 0, top = 1, bottom = 0, cur = 0;
+  for (int k = 0; k < size; k++) {
+    int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
+                                   (Solaris::lgrp_id_t*)&ids[top], size - top);
+    if (r == -1) {
+      ids[0] = 0;
+      return 1;
+    }
+    if (!r) {
+      // That's a leaf node.
+      assert(bottom <= cur, "Sanity check");
+      // Check if the node has memory
+      if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
+                                  NULL, 0, LGRP_RSRC_MEM) > 0) {
+        ids[bottom++] = ids[cur];
+      }
+    }
+    top += r;
+    cur++;
+  }
+  if (bottom == 0) {
+    // Handle a situation, when the OS reports no memory available.
+    // Assume UMA architecture.
+    ids[0] = 0;
+    return 1;
+  }
+  return bottom;
 }
 
 // Detect the topology change. Typically happens during CPU plugging-unplugging.
@@ -2727,7 +2728,8 @@
 
 // Scan the pages from start to end until a page different than
 // the one described in the info parameter is encountered.
-char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
+char *os::scan_pages(char *start, char* end, page_info* page_expected,
+                     page_info* page_found) {
   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
@@ -2754,10 +2756,9 @@
           if (outdata[types * i + 1] != page_expected->size) {
             break;
           }
-        } else
-          if (page_expected->size != 0) {
-            break;
-          }
+        } else if (page_expected->size != 0) {
+          break;
+        }
 
         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
           if (outdata[types * i] != page_expected->lgrp_id) {
@@ -2808,11 +2809,13 @@
   return b;
 }
 
-char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
+char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes,
+                             size_t alignment_hint, bool fixed) {
   char* addr = requested_addr;
   int flags = MAP_PRIVATE | MAP_NORESERVE;
 
-  assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
+  assert(!(fixed && (alignment_hint > 0)),
+         "alignment hint meaningless with fixed mmap");
 
   if (fixed) {
     flags |= MAP_FIXED;
@@ -2827,8 +2830,10 @@
   return mmap_chunk(addr, bytes, flags, PROT_NONE);
 }
 
-char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
-  char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
+char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
+                            size_t alignment_hint) {
+  char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint,
+                                  (requested_addr != NULL));
 
   guarantee(requested_addr == NULL || requested_addr == addr,
             "OS failed to return requested mmap address.");
@@ -3093,7 +3098,8 @@
   }
 }
 
-bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
+bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes,
+                                    size_t align) {
   // Signal to OS that we want large pages for addresses
   // from addr, addr + bytes
   struct memcntl_mha mpss_struct;
@@ -3108,7 +3114,8 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t size, size_t alignment, char* addr,
+                                 bool exec) {
   fatal("os::reserve_memory_special should not be called on Solaris.");
   return NULL;
 }
@@ -3145,7 +3152,7 @@
 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
   size_t res;
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
   return res;
 }
@@ -3173,14 +3180,14 @@
     static hrtime_t last_time = 0;
     hrtime_t diff = getTimeNanos() - last_time;
 
-    if (diff < DontYieldALotInterval * 1000000)
+    if (diff < DontYieldALotInterval * 1000000) {
       return true;
+    }
 
     last_time += diff;
 
     return false;
-  }
-  else {
+  } else {
     return false;
   }
 }
@@ -3240,9 +3247,9 @@
 
 // sched class attributes
 typedef struct {
-        int   schedPolicy;              // classID
-        int   maxPrio;
-        int   minPrio;
+  int   schedPolicy;              // classID
+  int   maxPrio;
+  int   minPrio;
 } SchedInfo;
 
 
@@ -3375,8 +3382,10 @@
     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
   } else {
     // No clue - punt
-    if (ThreadPriorityVerbose)
-      tty->print_cr("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
+    if (ThreadPriorityVerbose) {
+      tty->print_cr("Unknown scheduling class: %s ... \n",
+                    ClassInfo.pc_clname);
+    }
     return EINVAL;      // no clue, punt
   }
 
@@ -3399,13 +3408,11 @@
 // Convert from the libthread "thr_setprio" scale to our current
 // lwp scheduling class scale.
 //
-static
-int     scale_to_lwp_priority (int rMin, int rMax, int x)
-{
+static int scale_to_lwp_priority(int rMin, int rMax, int x) {
   int v;
 
   if (x == 127) return rMax;            // avoid round-down
-    v = (((x*(rMax-rMin)))/128)+rMin;
+  v = (((x*(rMax-rMin)))/128)+rMin;
   return v;
 }
 
@@ -3428,8 +3435,9 @@
 
   // If something went wrong on init, don't change priorities.
   if (!priocntl_enable) {
-    if (ThreadPriorityVerbose)
+    if (ThreadPriorityVerbose) {
       tty->print_cr("Trying to set priority but init failed, ignoring");
+    }
     return EINVAL;
   }
 
@@ -3438,8 +3446,8 @@
   if (lwpid <= 0) {
     if (ThreadPriorityVerbose) {
       tty->print_cr("deferring the set_lwp_class_and_priority of thread "
-                     INTPTR_FORMAT " to %d, lwpid not set",
-                     ThreadID, newPrio);
+                    INTPTR_FORMAT " to %d, lwpid not set",
+                    ThreadID, newPrio);
     }
     return 0;
   }
@@ -3472,7 +3480,7 @@
     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
     int maxClamped     = MIN2(iaLimits.maxPrio,
                               cur_class == new_class
-                                ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
+                              ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
                                                        maxClamped, newPrio)
                                : newPrio;
@@ -3487,7 +3495,7 @@
     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
     int maxClamped     = MIN2(tsLimits.maxPrio,
                               cur_class == new_class
-                                ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
+                              ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
                                                        maxClamped, newPrio)
                                : newPrio;
@@ -3501,7 +3509,7 @@
     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
     int maxClamped     = MIN2(fxLimits.maxPrio,
                               cur_class == new_class
-                                ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
+                              ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
                                                        maxClamped, newPrio)
                                : newPrio;
@@ -3591,7 +3599,6 @@
 // Maximum priority an so on.  This will cause VM threads
 // to get unfair treatment against other Solaris processes
 // which do not explicitly alter their thread priorities.
-//
 
 int os::java_to_os_priority[CriticalPriority + 1] = {
   -99999,         // 0 Entry should never be used
@@ -3638,23 +3645,24 @@
 
   int lwp_status =
           set_lwp_class_and_priority(osthread->thread_id(),
-          osthread->lwp_id(),
-          newpri,
-          fxcritical ? fxLimits.schedPolicy : myClass,
-          !fxcritical);
+                                     osthread->lwp_id(),
+                                     newpri,
+                                     fxcritical ? fxLimits.schedPolicy : myClass,
+                                     !fxcritical);
   if (lwp_status != 0 && fxcritical) {
     // Try again, this time without changing the scheduling class
     newpri = java_MaxPriority_to_os_priority;
     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
-            osthread->lwp_id(),
-            newpri, myClass, false);
+                                            osthread->lwp_id(),
+                                            newpri, myClass, false);
   }
   status |= lwp_status;
   return (status == 0) ? OS_OK : OS_ERR;
 }
 
 
-OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
+OSReturn os::get_native_priority(const Thread* const thread,
+                                 int *priority_ptr) {
   int p;
   if (!UseThreadPriorities) {
     *priority_ptr = NormalPriority;
@@ -3843,12 +3851,12 @@
 }
 
 class PcFetcher : public os::SuspendedThreadTask {
-public:
+ public:
   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
   ExtendedPC result();
-protected:
+ protected:
   void do_task(const os::SuspendedThreadTaskContext& context);
-private:
+ private:
   ExtendedPC _epc;
 };
 
@@ -3883,7 +3891,9 @@
 
 // This does not do anything on Solaris. This is basically a hook for being
 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
-void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
+void os::os_exception_wrapper(java_call_t f, JavaValue* value,
+                              methodHandle* method, JavaCallArguments* args,
+                              Thread* thread) {
   f(value, method, args, thread);
 }
 
@@ -3914,9 +3924,10 @@
 // Note that the VM will print warnings if it detects conflicting signal
 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
 //
-extern "C" JNIEXPORT int
-JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
-                          int abort_if_unrecognized);
+extern "C" JNIEXPORT int JVM_handle_solaris_signal(int signo,
+                                                   siginfo_t* siginfo,
+                                                   void* ucontext,
+                                                   int abort_if_unrecognized);
 
 
 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
@@ -3925,18 +3936,18 @@
   errno = orig_errno;
 }
 
-/* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
-   is needed to provoke threads blocked on IO to return an EINTR
-   Note: this explicitly does NOT call JVM_handle_solaris_signal and
-   does NOT participate in signal chaining due to requirement for
-   NOT setting SA_RESTART to make EINTR work. */
+// Do not delete - if guarantee is ever removed,  a signal handler (even empty)
+// is needed to provoke threads blocked on IO to return an EINTR
+// Note: this explicitly does NOT call JVM_handle_solaris_signal and
+// does NOT participate in signal chaining due to requirement for
+// NOT setting SA_RESTART to make EINTR work.
 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
-   if (UseSignalChaining) {
-      struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
-      if (actp && actp->sa_handler) {
-        vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
-      }
-   }
+  if (UseSignalChaining) {
+    struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
+    if (actp && actp->sa_handler) {
+      vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
+    }
+  }
 }
 
 // This boolean allows users to forward their own non-matching signals
@@ -4021,27 +4032,31 @@
 }
 
 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
-  assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
+  assert((chainedsigactions != (struct sigaction *)NULL) &&
+         (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
   if (preinstalled_sigs[sig] != 0) {
     return &chainedsigactions[sig];
   }
   return NULL;
 }
 
-void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
-
+void os::Solaris::save_preinstalled_handler(int sig,
+                                            struct sigaction& oldAct) {
   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
-  assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
+  assert((chainedsigactions != (struct sigaction *)NULL) &&
+         (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
   chainedsigactions[sig] = oldAct;
   preinstalled_sigs[sig] = 1;
 }
 
-void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
+void os::Solaris::set_signal_handler(int sig, bool set_installed,
+                                     bool oktochain) {
   // Check for overwrite.
   struct sigaction oldAct;
   sigaction(sig, (struct sigaction*)NULL, &oldAct);
-  void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
-                                      : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
+  void* oldhand =
+      oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
+                          : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
@@ -4072,9 +4087,9 @@
   // not using stack banging
   if (!UseStackBanging && sig == SIGSEGV) {
     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
-  // Interruptible i/o requires SA_RESTART cleared so EINTR
-  // is returned instead of restarting system calls
   } else if (sig == os::Solaris::SIGinterrupt()) {
+    // Interruptible i/o requires SA_RESTART cleared so EINTR
+    // is returned instead of restarting system calls
     sigemptyset(&sigAct.sa_mask);
     sigAct.sa_handler = NULL;
     sigAct.sa_flags = SA_SIGINFO;
@@ -4092,9 +4107,12 @@
 }
 
 
-#define DO_SIGNAL_CHECK(sig) \
-  if (!sigismember(&check_signal_done, sig)) \
-    os::Solaris::check_signal_handler(sig)
+#define DO_SIGNAL_CHECK(sig)                      \
+  do {                                            \
+    if (!sigismember(&check_signal_done, sig)) {  \
+      os::Solaris::check_signal_handler(sig);     \
+    }                                             \
+  } while (0)
 
 // This method is a periodic task to check for misbehaving JNI applications
 // under CheckJNI, we can add any periodic checks here
@@ -4155,34 +4173,34 @@
 
 
   switch (sig) {
-    case SIGSEGV:
-    case SIGBUS:
-    case SIGFPE:
-    case SIGPIPE:
-    case SIGXFSZ:
-    case SIGILL:
+  case SIGSEGV:
+  case SIGBUS:
+  case SIGFPE:
+  case SIGPIPE:
+  case SIGXFSZ:
+  case SIGILL:
+    jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
+    break;
+
+  case SHUTDOWN1_SIGNAL:
+  case SHUTDOWN2_SIGNAL:
+  case SHUTDOWN3_SIGNAL:
+  case BREAK_SIGNAL:
+    jvmHandler = (address)user_handler();
+    break;
+
+  default:
+    int intrsig = os::Solaris::SIGinterrupt();
+    int asynsig = os::Solaris::SIGasync();
+
+    if (sig == intrsig) {
+      jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
+    } else if (sig == asynsig) {
       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
-      break;
-
-    case SHUTDOWN1_SIGNAL:
-    case SHUTDOWN2_SIGNAL:
-    case SHUTDOWN3_SIGNAL:
-    case BREAK_SIGNAL:
-      jvmHandler = (address)user_handler();
-      break;
-
-    default:
-      int intrsig = os::Solaris::SIGinterrupt();
-      int asynsig = os::Solaris::SIGasync();
-
-      if (sig == intrsig) {
-        jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
-      } else if (sig == asynsig) {
-        jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
-      } else {
-        return;
-      }
-      break;
+    } else {
+      return;
+    }
+    break;
   }
 
 
@@ -4253,7 +4271,7 @@
     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
     // can not register overridable signals which might be > 32
     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
-    // Tell libjsig jvm has finished setting signal handlers
+      // Tell libjsig jvm has finished setting signal handlers
       (*end_signal_setting)();
       libjsigdone = true;
     }
@@ -4288,7 +4306,8 @@
 }
 
 
-void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
+void report_error(const char* file_name, int line_no, const char* title,
+                  const char* format, ...);
 
 const char * signames[] = {
   "SIG0",
@@ -4306,9 +4325,9 @@
   if (0 < exception_code && exception_code <= SIGRTMAX) {
     // signal
     if (exception_code < sizeof(signames)/sizeof(const char*)) {
-       jio_snprintf(buf, size, "%s", signames[exception_code]);
+      jio_snprintf(buf, size, "%s", signames[exception_code]);
     } else {
-       jio_snprintf(buf, size, "SIG%d", exception_code);
+      jio_snprintf(buf, size, "SIG%d", exception_code);
     }
     return buf;
   } else {
@@ -4402,8 +4421,7 @@
     os::Solaris::set_cond_init(lwp_cond_init);
     os::Solaris::set_cond_destroy(lwp_cond_destroy);
     os::Solaris::set_cond_scope(USYNC_THREAD);
-  }
-  else {
+  } else {
     os::Solaris::set_mutex_scope(USYNC_THREAD);
     os::Solaris::set_cond_scope(USYNC_THREAD);
 
@@ -4420,8 +4438,7 @@
       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
       os::Solaris::set_cond_init(pthread_cond_default_init);
       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
-    }
-    else {
+    } else {
       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
@@ -4449,7 +4466,7 @@
     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
-                                       dlsym(handle, "lgrp_cookie_stale")));
+                                                      dlsym(handle, "lgrp_cookie_stale")));
 
     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
     set_lgrp_cookie(c);
@@ -4502,9 +4519,10 @@
   init_random(1234567);
 
   page_size = sysconf(_SC_PAGESIZE);
-  if (page_size == -1)
+  if (page_size == -1) {
     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
                   strerror(errno)));
+  }
   init_page_sizes((size_t) page_size);
 
   Solaris::initialize_system_info();
@@ -4530,8 +4548,9 @@
   // and is available on linker patches for 5.7 and 5.8.
   // libdl.so must have been loaded, this call is just an entry lookup
   void * hdl = dlopen("libdl.so", RTLD_NOW);
-  if (hdl)
+  if (hdl) {
     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
+  }
 
   // (Solaris only) this switches to calls that actually do locking.
   ThreadCritical::initialize();
@@ -4579,8 +4598,10 @@
   os::set_polling_page(polling_page);
 
 #ifndef PRODUCT
-  if (Verbose && PrintMiscellaneous)
-    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
+  if (Verbose && PrintMiscellaneous) {
+    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
+               (intptr_t)polling_page);
+  }
 #endif
 
   if (!UseMembar) {
@@ -4589,8 +4610,10 @@
     os::set_memory_serialize_page(mem_serialize_page);
 
 #ifndef PRODUCT
-    if (Verbose && PrintMiscellaneous)
-      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
+    if (Verbose && PrintMiscellaneous) {
+      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
+                 (intptr_t)mem_serialize_page);
+    }
 #endif
   }
 
@@ -4600,12 +4623,12 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
-            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
-                    2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
+                                        (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+                                        2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
-    threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
+      threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
                   os::Solaris::min_stack_allowed/K);
     return JNI_ERR;
@@ -4619,17 +4642,17 @@
   // should be to fix the guard page mechanism.
 
   if (vm_page_size() > 8*K) {
-      threadStackSizeInBytes = (threadStackSizeInBytes != 0)
-         ? threadStackSizeInBytes +
-           ((StackYellowPages + StackRedPages) * vm_page_size())
-         : 0;
-      ThreadStackSize = threadStackSizeInBytes/K;
+    threadStackSizeInBytes = (threadStackSizeInBytes != 0)
+       ? threadStackSizeInBytes +
+         ((StackYellowPages + StackRedPages) * vm_page_size())
+       : 0;
+    ThreadStackSize = threadStackSizeInBytes/K;
   }
 
   // Make the stack size a multiple of the page size so that
   // the yellow/red zones can be guarded.
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
-        vm_page_size()));
+                                                vm_page_size()));
 
   Solaris::libthread_init();
 
@@ -4669,14 +4692,16 @@
     struct rlimit nbr_files;
     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
     if (status != 0) {
-      if (PrintMiscellaneous && (Verbose || WizardMode))
+      if (PrintMiscellaneous && (Verbose || WizardMode)) {
         perror("os::init_2 getrlimit failed");
+      }
     } else {
       nbr_files.rlim_cur = nbr_files.rlim_max;
       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
       if (status != 0) {
-        if (PrintMiscellaneous && (Verbose || WizardMode))
+        if (PrintMiscellaneous && (Verbose || WizardMode)) {
           perror("os::init_2 setrlimit failed");
+        }
       }
     }
   }
@@ -4728,15 +4753,17 @@
 
 // Mark the polling page as unreadable
 void os::make_polling_page_unreadable(void) {
-  if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0)
+  if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) {
     fatal("Could not disable polling page");
-};
+  }
+}
 
 // Mark the polling page as readable
 void os::make_polling_page_readable(void) {
-  if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0)
+  if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0) {
     fatal("Could not enable polling page");
-};
+  }
+}
 
 // OS interface.
 
@@ -4749,13 +4776,15 @@
   if (!sol_vsnprintf) {
     //search  for the named symbol in the objects that were loaded after libjvm
     void* where = RTLD_NEXT;
-    if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
-        sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
+    if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) {
+      sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
+    }
     if (!sol_vsnprintf){
       //search  for the named symbol in the objects that were loaded before libjvm
       where = RTLD_DEFAULT;
-      if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
+      if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) {
         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
+      }
       assert(sol_vsnprintf != NULL, "vsnprintf not found");
     }
   }
@@ -4771,7 +4800,7 @@
   dir = opendir(path);
   if (dir == NULL) return true;
 
-  /* Scan the directory */
+  // Scan the directory
   bool result = true;
   char buf[sizeof(struct dirent) + MAX_PATH];
   struct dirent *dbuf = (struct dirent *) buf;
@@ -4788,7 +4817,7 @@
 // from src/solaris/hpi/src/system_md.c
 
 #ifndef O_DELETE
-#define O_DELETE 0x10000
+  #define O_DELETE 0x10000
 #endif
 
 // Open a file. Unlink the file immediately after open returns
@@ -4807,7 +4836,7 @@
   fd = ::open64(path, oflag, mode);
   if (fd == -1) return -1;
 
-  //If the open succeeded, the file might still be a directory
+  // If the open succeeded, the file might still be a directory
   {
     struct stat64 buf64;
     int ret = ::fstat64(fd, &buf64);
@@ -4824,77 +4853,78 @@
       return -1;
     }
   }
-    /*
-     * 32-bit Solaris systems suffer from:
-     *
-     * - an historical default soft limit of 256 per-process file
-     *   descriptors that is too low for many Java programs.
-     *
-     * - a design flaw where file descriptors created using stdio
-     *   fopen must be less than 256, _even_ when the first limit above
-     *   has been raised.  This can cause calls to fopen (but not calls to
-     *   open, for example) to fail mysteriously, perhaps in 3rd party
-     *   native code (although the JDK itself uses fopen).  One can hardly
-     *   criticize them for using this most standard of all functions.
-     *
-     * We attempt to make everything work anyways by:
-     *
-     * - raising the soft limit on per-process file descriptors beyond
-     *   256
-     *
-     * - As of Solaris 10u4, we can request that Solaris raise the 256
-     *   stdio fopen limit by calling function enable_extended_FILE_stdio.
-     *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
-     *
-     * - If we are stuck on an old (pre 10u4) Solaris system, we can
-     *   workaround the bug by remapping non-stdio file descriptors below
-     *   256 to ones beyond 256, which is done below.
-     *
-     * See:
-     * 1085341: 32-bit stdio routines should support file descriptors >255
-     * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
-     * 6431278: Netbeans crash on 32 bit Solaris: need to call
-     *          enable_extended_FILE_stdio() in VM initialisation
-     * Giri Mandalika's blog
-     * http://technopark02.blogspot.com/2005_05_01_archive.html
-     */
+
+  // 32-bit Solaris systems suffer from:
+  //
+  // - an historical default soft limit of 256 per-process file
+  //   descriptors that is too low for many Java programs.
+  //
+  // - a design flaw where file descriptors created using stdio
+  //   fopen must be less than 256, _even_ when the first limit above
+  //   has been raised.  This can cause calls to fopen (but not calls to
+  //   open, for example) to fail mysteriously, perhaps in 3rd party
+  //   native code (although the JDK itself uses fopen).  One can hardly
+  //   criticize them for using this most standard of all functions.
+  //
+  // We attempt to make everything work anyways by:
+  //
+  // - raising the soft limit on per-process file descriptors beyond
+  //   256
+  //
+  // - As of Solaris 10u4, we can request that Solaris raise the 256
+  //   stdio fopen limit by calling function enable_extended_FILE_stdio.
+  //   This is done in init_2 and recorded in enabled_extended_FILE_stdio
+  //
+  // - If we are stuck on an old (pre 10u4) Solaris system, we can
+  //   workaround the bug by remapping non-stdio file descriptors below
+  //   256 to ones beyond 256, which is done below.
+  //
+  // See:
+  // 1085341: 32-bit stdio routines should support file descriptors >255
+  // 6533291: Work around 32-bit Solaris stdio limit of 256 open files
+  // 6431278: Netbeans crash on 32 bit Solaris: need to call
+  //          enable_extended_FILE_stdio() in VM initialisation
+  // Giri Mandalika's blog
+  // http://technopark02.blogspot.com/2005_05_01_archive.html
+  //
 #ifndef  _LP64
-     if ((!enabled_extended_FILE_stdio) && fd < 256) {
-         int newfd = ::fcntl(fd, F_DUPFD, 256);
-         if (newfd != -1) {
-             ::close(fd);
-             fd = newfd;
-         }
-     }
+  if ((!enabled_extended_FILE_stdio) && fd < 256) {
+    int newfd = ::fcntl(fd, F_DUPFD, 256);
+    if (newfd != -1) {
+      ::close(fd);
+      fd = newfd;
+    }
+  }
 #endif // 32-bit Solaris
-    /*
-     * All file descriptors that are opened in the JVM and not
-     * specifically destined for a subprocess should have the
-     * close-on-exec flag set.  If we don't set it, then careless 3rd
-     * party native code might fork and exec without closing all
-     * appropriate file descriptors (e.g. as we do in closeDescriptors in
-     * UNIXProcess.c), and this in turn might:
-     *
-     * - cause end-of-file to fail to be detected on some file
-     *   descriptors, resulting in mysterious hangs, or
-     *
-     * - might cause an fopen in the subprocess to fail on a system
-     *   suffering from bug 1085341.
-     *
-     * (Yes, the default setting of the close-on-exec flag is a Unix
-     * design flaw)
-     *
-     * See:
-     * 1085341: 32-bit stdio routines should support file descriptors >255
-     * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
-     * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
-     */
+
+  // All file descriptors that are opened in the JVM and not
+  // specifically destined for a subprocess should have the
+  // close-on-exec flag set.  If we don't set it, then careless 3rd
+  // party native code might fork and exec without closing all
+  // appropriate file descriptors (e.g. as we do in closeDescriptors in
+  // UNIXProcess.c), and this in turn might:
+  //
+  // - cause end-of-file to fail to be detected on some file
+  //   descriptors, resulting in mysterious hangs, or
+  //
+  // - might cause an fopen in the subprocess to fail on a system
+  //   suffering from bug 1085341.
+  //
+  // (Yes, the default setting of the close-on-exec flag is a Unix
+  // design flaw)
+  //
+  // See:
+  // 1085341: 32-bit stdio routines should support file descriptors >255
+  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
+  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
+  //
 #ifdef FD_CLOEXEC
-    {
-        int flags = ::fcntl(fd, F_GETFD);
-        if (flags != -1)
-            ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
+  {
+    int flags = ::fcntl(fd, F_GETFD);
+    if (flags != -1) {
+      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
     }
+  }
 #endif
 
   if (o_delete != 0) {
@@ -4940,7 +4970,7 @@
 
 int os::available(int fd, jlong *bytes) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   jlong cur, end;
   int mode;
   struct stat64 buf64;
@@ -4952,7 +4982,7 @@
 
       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
       if (ioctl_return>= 0) {
-          *bytes = n;
+        *bytes = n;
         return 1;
       }
     }
@@ -4970,8 +5000,8 @@
 
 // Map a block of memory.
 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
-                     char *addr, size_t bytes, bool read_only,
-                     bool allow_exec) {
+                        char *addr, size_t bytes, bool read_only,
+                        bool allow_exec) {
   int prot;
   int flags;
 
@@ -5002,8 +5032,8 @@
 
 // Remap a block of memory.
 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
-                       char *addr, size_t bytes, bool read_only,
-                       bool allow_exec) {
+                          char *addr, size_t bytes, bool read_only,
+                          bool allow_exec) {
   // same as map_memory() on this OS
   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
                         allow_exec);
@@ -5032,7 +5062,7 @@
     }
   } else {
     jio_fprintf(stderr,
-      "Could not open pause file '%s', continuing immediately.\n", filename);
+                "Could not open pause file '%s', continuing immediately.\n", filename);
   }
 }
 
@@ -5048,9 +5078,8 @@
 class RecordSynch {
   char* _name;
  public:
-  RecordSynch(char* name) :_name(name)
-                 { record_synch(_name, false); }
-  ~RecordSynch() { record_synch(_name,   true);  }
+  RecordSynch(char* name) :_name(name) { record_synch(_name, false); }
+  ~RecordSynch()                       { record_synch(_name, true); }
 };
 
 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
@@ -5080,7 +5109,7 @@
   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
 
 #define CHECK_MUTEX(mutex_op) \
-CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
+  CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
 
 CHECK_MUTEX(   mutex_lock)
 CHECK_MUTEX(  _mutex_lock)
@@ -5090,14 +5119,14 @@
 CHECK_MUTEX(_mutex_trylock)
 
 #define CHECK_COND(cond_op) \
-CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
+  CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU; CHECK_CV);
 
 CHECK_COND( cond_wait);
 CHECK_COND(_cond_wait);
 CHECK_COND(_cond_wait_cancel);
 
 #define CHECK_COND2(cond_op) \
-CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
+  CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU; CHECK_CV);
 
 CHECK_COND2( cond_timedwait);
 CHECK_COND2(_cond_timedwait);
@@ -5221,16 +5250,16 @@
   int fd;
 
   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
-                     getpid(),
-                     thread->osthread()->lwp_id());
+          getpid(),
+          thread->osthread()->lwp_id());
   fd = ::open(proc_name, O_RDONLY);
   if (fd == -1) return -1;
 
   do {
     count = ::pread(fd,
-                  (void *)&prusage.pr_utime,
-                  thr_time_size,
-                  thr_time_off);
+                    (void *)&prusage.pr_utime,
+                    thr_time_size,
+                    thr_time_off);
   } while (count < 0 && errno == EINTR);
   ::close(fd);
   if (count < 0) return -1;
@@ -5288,10 +5317,11 @@
     st->print(PTR_FORMAT ": ", addr);
     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
-    } else if (dlinfo.dli_fbase != NULL)
+    } else if (dlinfo.dli_fbase != NULL) {
       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
-    else
+    } else {
       st->print("<absolute address>");
+    }
     if (dlinfo.dli_fname != NULL) {
       st->print(" in %s", dlinfo.dli_fname);
     }
@@ -5309,8 +5339,9 @@
       if (begin < lowest)  begin = lowest;
       Dl_info dlinfo2;
       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
-          && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
+          && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
         end = (address) dlinfo2.dli_saddr;
+      }
       Disassembler::decode(begin, end, st);
     }
     return true;
@@ -5426,15 +5457,15 @@
     // leave it alone rather than always rounding down.
 
     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
-       // It appears that when we go directly through Solaris _lwp_cond_timedwait()
-           // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
-           max_wait_period = 21000000;
+    // It appears that when we go directly through Solaris _lwp_cond_timedwait()
+    // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
+    max_wait_period = 21000000;
   } else {
     max_wait_period = 50000000;
   }
   millis %= 1000;
   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
-     seconds = max_wait_period;
+    seconds = max_wait_period;
   }
   abstime->tv_sec = now.tv_sec  + seconds;
   long       usec = now.tv_usec + millis * 1000;
@@ -5453,34 +5484,34 @@
 
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v == 0) {
-     // Do this the hard way by blocking ...
-     // See http://monaco.sfbay/detail.jsf?cr=5094058.
-     // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
-     // Only for SPARC >= V8PlusA
+    // Do this the hard way by blocking ...
+    // See http://monaco.sfbay/detail.jsf?cr=5094058.
+    // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
+    // Only for SPARC >= V8PlusA
 #if defined(__sparc) && defined(COMPILER2)
-     if (ClearFPUAtPark) { _mark_fpu_nosave(); }
+    if (ClearFPUAtPark) { _mark_fpu_nosave(); }
 #endif
-     int status = os::Solaris::mutex_lock(_mutex);
-     assert_status(status == 0, status, "mutex_lock");
-     guarantee(_nParked == 0, "invariant");
-     ++_nParked;
-     while (_Event < 0) {
-        // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
-        // Treat this the same as if the wait was interrupted
-        // With usr/lib/lwp going to kernel, always handle ETIME
-        status = os::Solaris::cond_wait(_cond, _mutex);
-        if (status == ETIME) status = EINTR;
-        assert_status(status == 0 || status == EINTR, status, "cond_wait");
-     }
-     --_nParked;
-     _Event = 0;
-     status = os::Solaris::mutex_unlock(_mutex);
-     assert_status(status == 0, status, "mutex_unlock");
+    int status = os::Solaris::mutex_lock(_mutex);
+    assert_status(status == 0, status, "mutex_lock");
+    guarantee(_nParked == 0, "invariant");
+    ++_nParked;
+    while (_Event < 0) {
+      // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
+      // Treat this the same as if the wait was interrupted
+      // With usr/lib/lwp going to kernel, always handle ETIME
+      status = os::Solaris::cond_wait(_cond, _mutex);
+      if (status == ETIME) status = EINTR;
+      assert_status(status == 0 || status == EINTR, status, "cond_wait");
+    }
+    --_nParked;
+    _Event = 0;
+    status = os::Solaris::mutex_unlock(_mutex);
+    assert_status(status == 0, status, "mutex_unlock");
     // Paranoia to ensure our locked and lock-free paths interact
     // correctly with each other.
     OrderAccess::fence();
@@ -5491,8 +5522,8 @@
   guarantee(_nParked == 0, "invariant");
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v != 0) return OS_OK;
@@ -5505,20 +5536,20 @@
   // For Solaris SPARC set fprs.FEF=0 prior to parking.
   // Only for SPARC >= V8PlusA
 #if defined(__sparc) && defined(COMPILER2)
- if (ClearFPUAtPark) { _mark_fpu_nosave(); }
+  if (ClearFPUAtPark) { _mark_fpu_nosave(); }
 #endif
   int status = os::Solaris::mutex_lock(_mutex);
   assert_status(status == 0, status, "mutex_lock");
   guarantee(_nParked == 0, "invariant");
   ++_nParked;
   while (_Event < 0) {
-     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
-     assert_status(status == 0 || status == EINTR ||
-                   status == ETIME || status == ETIMEDOUT,
-                   status, "cond_timedwait");
-     if (!FilterSpuriousWakeups) break;                // previous semantics
-     if (status == ETIME || status == ETIMEDOUT) break;
-     // We consume and ignore EINTR and spurious wakeups.
+    int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
+    assert_status(status == 0 || status == EINTR ||
+                  status == ETIME || status == ETIMEDOUT,
+                  status, "cond_timedwait");
+    if (!FilterSpuriousWakeups) break;                // previous semantics
+    if (status == ETIME || status == ETIMEDOUT) break;
+    // We consume and ignore EINTR and spurious wakeups.
   }
   --_nParked;
   if (_Event >= 0) ret = OS_OK;
@@ -5567,36 +5598,34 @@
 // JSR166
 // -------------------------------------------------------
 
-/*
- * The solaris and linux implementations of park/unpark are fairly
- * conservative for now, but can be improved. They currently use a
- * mutex/condvar pair, plus _counter.
- * Park decrements _counter if > 0, else does a condvar wait.  Unpark
- * sets count to 1 and signals condvar.  Only one thread ever waits
- * on the condvar. Contention seen when trying to park implies that someone
- * is unparking you, so don't wait. And spurious returns are fine, so there
- * is no need to track notifications.
- */
+// The solaris and linux implementations of park/unpark are fairly
+// conservative for now, but can be improved. They currently use a
+// mutex/condvar pair, plus _counter.
+// Park decrements _counter if > 0, else does a condvar wait.  Unpark
+// sets count to 1 and signals condvar.  Only one thread ever waits
+// on the condvar. Contention seen when trying to park implies that someone
+// is unparking you, so don't wait. And spurious returns are fine, so there
+// is no need to track notifications.
 
 #define MAX_SECS 100000000
-/*
- * This code is common to linux and solaris and will be moved to a
- * common place in dolphin.
- *
- * The passed in time value is either a relative time in nanoseconds
- * or an absolute time in milliseconds. Either way it has to be unpacked
- * into suitable seconds and nanoseconds components and stored in the
- * given timespec structure.
- * Given time is a 64-bit value and the time_t used in the timespec is only
- * a signed-32-bit value (except on 64-bit Linux) we have to watch for
- * overflow if times way in the future are given. Further on Solaris versions
- * prior to 10 there is a restriction (see cond_timedwait) that the specified
- * number of seconds, in abstime, is less than current_time  + 100,000,000.
- * As it will be 28 years before "now + 100000000" will overflow we can
- * ignore overflow and just impose a hard-limit on seconds using the value
- * of "now + 100,000,000". This places a limit on the timeout of about 3.17
- * years from "now".
- */
+
+// This code is common to linux and solaris and will be moved to a
+// common place in dolphin.
+//
+// The passed in time value is either a relative time in nanoseconds
+// or an absolute time in milliseconds. Either way it has to be unpacked
+// into suitable seconds and nanoseconds components and stored in the
+// given timespec structure.
+// Given time is a 64-bit value and the time_t used in the timespec is only
+// a signed-32-bit value (except on 64-bit Linux) we have to watch for
+// overflow if times way in the future are given. Further on Solaris versions
+// prior to 10 there is a restriction (see cond_timedwait) that the specified
+// number of seconds, in abstime, is less than current_time  + 100,000,000.
+// As it will be 28 years before "now + 100000000" will overflow we can
+// ignore overflow and just impose a hard-limit on seconds using the value
+// of "now + 100,000,000". This places a limit on the timeout of about 3.17
+// years from "now".
+//
 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
   assert(time > 0, "convertTime");
 
@@ -5610,19 +5639,16 @@
     jlong secs = time / 1000;
     if (secs > max_secs) {
       absTime->tv_sec = max_secs;
-    }
-    else {
+    } else {
       absTime->tv_sec = secs;
     }
     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
-  }
-  else {
+  } else {
     jlong secs = time / NANOSECS_PER_SEC;
     if (secs >= MAX_SECS) {
       absTime->tv_sec = max_secs;
       absTime->tv_nsec = 0;
-    }
-    else {
+    } else {
       absTime->tv_sec = now.tv_sec + secs;
       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
@@ -5799,26 +5825,26 @@
     // Wait for the child process to exit.  This returns immediately if
     // the child has already exited. */
     while (waitpid(pid, &status, 0) < 0) {
-        switch (errno) {
-        case ECHILD: return 0;
-        case EINTR: break;
-        default: return -1;
-        }
+      switch (errno) {
+      case ECHILD: return 0;
+      case EINTR: break;
+      default: return -1;
+      }
     }
 
     if (WIFEXITED(status)) {
-       // The child exited normally; get its exit code.
-       return WEXITSTATUS(status);
+      // The child exited normally; get its exit code.
+      return WEXITSTATUS(status);
     } else if (WIFSIGNALED(status)) {
-       // The child exited because of a signal
-       // The best value to return is 0x80 + signal number,
-       // because that is what all Unix shells do, and because
-       // it allows callers to distinguish between process exit and
-       // process death by signal.
-       return 0x80 + WTERMSIG(status);
+      // The child exited because of a signal
+      // The best value to return is 0x80 + signal number,
+      // because that is what all Unix shells do, and because
+      // it allows callers to distinguish between process exit and
+      // process death by signal.
+      return 0x80 + WTERMSIG(status);
     } else {
-       // Unknown exit code; pass it through
-       return status;
+      // Unknown exit code; pass it through
+      return status;
     }
   }
 }
@@ -5832,43 +5858,49 @@
 // as libawt.so, and renamed libawt_xawt.so
 //
 bool os::is_headless_jre() {
-    struct stat statbuf;
-    char buf[MAXPATHLEN];
-    char libmawtpath[MAXPATHLEN];
-    const char *xawtstr  = "/xawt/libmawt.so";
-    const char *new_xawtstr = "/libawt_xawt.so";
-    char *p;
-
-    // Get path to libjvm.so
-    os::jvm_path(buf, sizeof(buf));
-
-    // Get rid of libjvm.so
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // Get rid of client or server
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // check xawt/libmawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    // check libawt_xawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, new_xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    return true;
+  struct stat statbuf;
+  char buf[MAXPATHLEN];
+  char libmawtpath[MAXPATHLEN];
+  const char *xawtstr  = "/xawt/libmawt.so";
+  const char *new_xawtstr = "/libawt_xawt.so";
+  char *p;
+
+  // Get path to libjvm.so
+  os::jvm_path(buf, sizeof(buf));
+
+  // Get rid of libjvm.so
+  p = strrchr(buf, '/');
+  if (p == NULL) {
+    return false;
+  } else {
+    *p = '\0';
+  }
+
+  // Get rid of client or server
+  p = strrchr(buf, '/');
+  if (p == NULL) {
+    return false;
+  } else {
+    *p = '\0';
+  }
+
+  // check xawt/libmawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  // check libawt_xawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, new_xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  return true;
 }
 
 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
   size_t res;
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
   return res;
 }
@@ -5883,13 +5915,13 @@
 
 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
 }
 
 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
 }
 
@@ -5912,7 +5944,7 @@
   pfd.events = POLLIN;
 
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
 
   gettimeofday(&t, &aNull);
   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
@@ -5920,14 +5952,15 @@
   for (;;) {
     res = ::poll(&pfd, 1, timeout);
     if (res == OS_ERR && errno == EINTR) {
-        if (timeout != -1) {
-          gettimeofday(&t, &aNull);
-          newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
-          timeout -= newtime - prevtime;
-          if (timeout <= 0)
-            return OS_OK;
-          prevtime = newtime;
+      if (timeout != -1) {
+        gettimeofday(&t, &aNull);
+        newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
+        timeout -= newtime - prevtime;
+        if (timeout <= 0) {
+          return OS_OK;
         }
+        prevtime = newtime;
+      }
     } else return res;
   }
 }
@@ -5956,41 +5989,41 @@
   //
   //     EISCONN          The socket is already connected.
   if (_result == OS_ERR && errno == EINTR) {
-     /* restarting a connect() changes its errno semantics */
-     RESTARTABLE(::connect(fd, him, len), _result);
-     /* undo these changes */
-     if (_result == OS_ERR) {
-       if (errno == EALREADY) {
-         errno = EINPROGRESS; /* fall through */
-       } else if (errno == EISCONN) {
-         errno = 0;
-         return OS_OK;
-       }
-     }
-   }
-   return _result;
- }
+    // restarting a connect() changes its errno semantics
+    RESTARTABLE(::connect(fd, him, len), _result);
+    // undo these changes
+    if (_result == OS_ERR) {
+      if (errno == EALREADY) {
+        errno = EINPROGRESS; // fall through
+      } else if (errno == EISCONN) {
+        errno = 0;
+        return OS_OK;
+      }
+    }
+  }
+  return _result;
+}
 
 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
   if (fd < 0) {
     return OS_ERR;
   }
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE_RETURN_INT((int)::accept(fd, him, len));
 }
 
 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
                  sockaddr* from, socklen_t* fromlen) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
 }
 
 int os::sendto(int fd, char* buf, size_t len, uint flags,
                struct sockaddr* to, socklen_t tolen) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
 }
 
@@ -6007,8 +6040,8 @@
 
 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
-   return ::bind(fd, him, len);
+         "Assumed _thread_in_native");
+  return ::bind(fd, him, len);
 }
 
 // Get the default path to the core file
--- a/hotspot/src/os/solaris/vm/os_solaris.hpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.hpp	Wed Sep 10 17:06:36 2014 -0700
@@ -36,10 +36,10 @@
  private:
 
   // Support for "new" libthread APIs for getting & setting thread context (2.8)
-  #define TRS_VALID       0
-  #define TRS_NONVOLATILE 1
-  #define TRS_LWPID       2
-  #define TRS_INVALID     3
+#define TRS_VALID       0
+#define TRS_NONVOLATILE 1
+#define TRS_LWPID       2
+#define TRS_INVALID     3
 
   // initialized to libthread or lwp synchronization primitives depending on UseLWPSychronization
   static int_fnP_mutex_tP _mutex_lock;
@@ -61,8 +61,8 @@
   typedef id_t            lgrp_id_t;
   typedef int             lgrp_rsrc_t;
   typedef enum lgrp_view {
-        LGRP_VIEW_CALLER,       /* what's available to the caller */
-        LGRP_VIEW_OS            /* what's available to operating system */
+    LGRP_VIEW_CALLER,       // what's available to the caller
+    LGRP_VIEW_OS            // what's available to operating system
   } lgrp_view_t;
 
   typedef uint_t (*getisax_func_t)(uint32_t* array, uint_t n);
@@ -74,8 +74,8 @@
   typedef int (*lgrp_children_func_t)(lgrp_cookie_t  cookie,  lgrp_id_t  parent,
                                       lgrp_id_t *lgrp_array, uint_t lgrp_array_size);
   typedef int (*lgrp_resources_func_t)(lgrp_cookie_t  cookie,  lgrp_id_t  lgrp,
-                                      lgrp_id_t *lgrp_array, uint_t lgrp_array_size,
-                                      lgrp_rsrc_t type);
+                                       lgrp_id_t *lgrp_array, uint_t lgrp_array_size,
+                                       lgrp_rsrc_t type);
   typedef int (*lgrp_nlgrps_func_t)(lgrp_cookie_t cookie);
   typedef int (*lgrp_cookie_stale_func_t)(lgrp_cookie_t cookie);
   typedef int (*meminfo_func_t)(const uint64_t inaddr[],   int addr_count,
@@ -128,7 +128,7 @@
   static bool valid_stack_address(Thread* thread, address sp);
   static bool valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect);
   static ucontext_t* get_valid_uc_in_signal_handler(Thread* thread,
-    ucontext_t* uc);
+                                                    ucontext_t* uc);
 
   static ExtendedPC  ucontext_get_ExtendedPC(ucontext_t* uc);
   static intptr_t*   ucontext_get_sp(ucontext_t* uc);
@@ -143,7 +143,7 @@
   // os_solaris_i486.hpp and os_solaris_sparc.hpp, but that file
   // provides extensions to the os class and not the Solaris class.
   static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
-    intptr_t** ret_sp, intptr_t** ret_fp);
+                                              intptr_t** ret_sp, intptr_t** ret_fp);
 
   static void hotspot_sigmask(Thread* thread);
 
@@ -216,8 +216,7 @@
   static void set_mutex_destroy(int_fnP_mutex_tP func)   { _mutex_destroy = func; }
   static void set_mutex_scope(int scope)                 { _mutex_scope = scope; }
 
-  static int cond_timedwait(cond_t *cv, mutex_t *mx, timestruc_t *abst)
-                                                { return _cond_timedwait(cv, mx, abst); }
+  static int cond_timedwait(cond_t *cv, mutex_t *mx, timestruc_t *abst) { return _cond_timedwait(cv, mx, abst); }
   static int cond_wait(cond_t *cv, mutex_t *mx) { return _cond_wait(cv, mx); }
   static int cond_signal(cond_t *cv)            { return _cond_signal(cv); }
   static int cond_broadcast(cond_t *cv)         { return _cond_broadcast(cv); }
@@ -225,8 +224,7 @@
   static int cond_destroy(cond_t *cv)           { return _cond_destroy(cv); }
   static int cond_scope()                       { return _cond_scope; }
 
-  static void set_cond_timedwait(int_fnP_cond_tP_mutex_tP_timestruc_tP func)
-                                                           { _cond_timedwait = func; }
+  static void set_cond_timedwait(int_fnP_cond_tP_mutex_tP_timestruc_tP func) { _cond_timedwait = func; }
   static void set_cond_wait(int_fnP_cond_tP_mutex_tP func) { _cond_wait = func; }
   static void set_cond_signal(int_fnP_cond_tP func)        { _cond_signal = func; }
   static void set_cond_broadcast(int_fnP_cond_tP func)     { _cond_broadcast = func; }
@@ -247,9 +245,9 @@
   static id_t lgrp_home(idtype_t type, id_t id)      { return _lgrp_home != NULL ? _lgrp_home(type, id) : -1; }
   static lgrp_cookie_t lgrp_init(lgrp_view_t view)   { return _lgrp_init != NULL ? _lgrp_init(view) : 0; }
   static int lgrp_fini(lgrp_cookie_t cookie)         { return _lgrp_fini != NULL ? _lgrp_fini(cookie) : -1; }
-  static lgrp_id_t lgrp_root(lgrp_cookie_t cookie)   { return _lgrp_root != NULL ? _lgrp_root(cookie) : -1; };
+  static lgrp_id_t lgrp_root(lgrp_cookie_t cookie)   { return _lgrp_root != NULL ? _lgrp_root(cookie) : -1; }
   static int lgrp_children(lgrp_cookie_t  cookie,  lgrp_id_t  parent,
-                    lgrp_id_t *lgrp_array, uint_t lgrp_array_size) {
+                           lgrp_id_t *lgrp_array, uint_t lgrp_array_size) {
     return _lgrp_children != NULL ? _lgrp_children(cookie, parent, lgrp_array, lgrp_array_size) : -1;
   }
   static int lgrp_resources(lgrp_cookie_t  cookie,  lgrp_id_t  lgrp,
@@ -269,8 +267,8 @@
 
   static void set_meminfo(meminfo_func_t func)       { _meminfo = func; }
   static int meminfo (const uint64_t inaddr[],   int addr_count,
-                     const uint_t  info_req[],  int info_count,
-                     uint64_t  outdata[], uint_t validity[]) {
+                      const uint_t  info_req[],  int info_count,
+                      uint64_t  outdata[], uint_t validity[]) {
     return _meminfo != NULL ? _meminfo(inaddr, addr_count, info_req, info_count,
                                        outdata, validity) : -1;
   }
@@ -300,57 +298,57 @@
 };
 
 class PlatformEvent : public CHeapObj<mtInternal> {
-  private:
-    double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
-    volatile int _Event;
-    int _nParked;
-    int _pipev[2];
-    mutex_t _mutex[1];
-    cond_t  _cond[1];
-    double PostPad[2];
+ private:
+  double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
+  volatile int _Event;
+  int _nParked;
+  int _pipev[2];
+  mutex_t _mutex[1];
+  cond_t  _cond[1];
+  double PostPad[2];
 
-  protected:
-    // Defining a protected ctor effectively gives us an abstract base class.
-    // That is, a PlatformEvent can never be instantiated "naked" but only
-    // as a part of a ParkEvent (recall that ParkEvent extends PlatformEvent).
-    // TODO-FIXME: make dtor private
-    ~PlatformEvent() { guarantee(0, "invariant"); }
-    PlatformEvent() {
-      int status;
-      status = os::Solaris::cond_init(_cond);
-      assert_status(status == 0, status, "cond_init");
-      status = os::Solaris::mutex_init(_mutex);
-      assert_status(status == 0, status, "mutex_init");
-      _Event   = 0;
-      _nParked = 0;
-      _pipev[0] = _pipev[1] = -1;
-    }
+ protected:
+  // Defining a protected ctor effectively gives us an abstract base class.
+  // That is, a PlatformEvent can never be instantiated "naked" but only
+  // as a part of a ParkEvent (recall that ParkEvent extends PlatformEvent).
+  // TODO-FIXME: make dtor private
+  ~PlatformEvent() { guarantee(0, "invariant"); }
+  PlatformEvent() {
+    int status;
+    status = os::Solaris::cond_init(_cond);
+    assert_status(status == 0, status, "cond_init");
+    status = os::Solaris::mutex_init(_mutex);
+    assert_status(status == 0, status, "mutex_init");
+    _Event   = 0;
+    _nParked = 0;
+    _pipev[0] = _pipev[1] = -1;
+  }
 
-  public:
-    // Exercise caution using reset() and fired() -- they may require MEMBARs
-    void reset() { _Event = 0; }
-    int  fired() { return _Event; }
-    void park();
-    int  park(jlong millis);
-    void unpark();
+ public:
+  // Exercise caution using reset() and fired() -- they may require MEMBARs
+  void reset() { _Event = 0; }
+  int  fired() { return _Event; }
+  void park();
+  int  park(jlong millis);
+  void unpark();
 };
 
 class PlatformParker : public CHeapObj<mtInternal> {
-  protected:
-    mutex_t _mutex[1];
-    cond_t  _cond[1];
+ protected:
+  mutex_t _mutex[1];
+  cond_t  _cond[1];
 
-  public:       // TODO-FIXME: make dtor private
-    ~PlatformParker() { guarantee(0, "invariant"); }
+ public:       // TODO-FIXME: make dtor private
+  ~PlatformParker() { guarantee(0, "invariant"); }
 
-  public:
-    PlatformParker() {
-      int status;
-      status = os::Solaris::cond_init(_cond);
-      assert_status(status == 0, status, "cond_init");
-      status = os::Solaris::mutex_init(_mutex);
-      assert_status(status == 0, status, "mutex_init");
-    }
+ public:
+  PlatformParker() {
+    int status;
+    status = os::Solaris::cond_init(_cond);
+    assert_status(status == 0, status, "cond_init");
+    status = os::Solaris::mutex_init(_mutex);
+    assert_status(status == 0, status, "mutex_init");
+  }
 };
 
 #endif // OS_SOLARIS_VM_OS_SOLARIS_HPP
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Wed Sep 10 17:06:36 2014 -0700
@@ -92,7 +92,7 @@
 #include <io.h>
 #include <process.h>              // For _beginthreadex(), _endthreadex()
 #include <imagehlp.h>             // For os::dll_address_to_function_name
-/* for enumerating dll libraries */
+// for enumerating dll libraries
 #include <vdmdbg.h>
 
 // for timer info max values which include all bits
@@ -113,11 +113,11 @@
 static FILETIME process_kernel_time;
 
 #ifdef _M_IA64
-#define __CPU__ ia64
+  #define __CPU__ ia64
 #elif _M_AMD64
-#define __CPU__ amd64
+  #define __CPU__ amd64
 #else
-#define __CPU__ i486
+  #define __CPU__ i486
 #endif
 
 // save DLL module handle, used by GetModuleFileName
@@ -126,18 +126,19 @@
 
 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
   switch (reason) {
-    case DLL_PROCESS_ATTACH:
-      vm_lib_handle = hinst;
-      if (ForceTimeHighResolution)
-        timeBeginPeriod(1L);
-      break;
-    case DLL_PROCESS_DETACH:
-      if (ForceTimeHighResolution)
-        timeEndPeriod(1L);
-
-      break;
-    default:
-      break;
+  case DLL_PROCESS_ATTACH:
+    vm_lib_handle = hinst;
+    if (ForceTimeHighResolution) {
+      timeBeginPeriod(1L);
+    }
+    break;
+  case DLL_PROCESS_DETACH:
+    if (ForceTimeHighResolution) {
+      timeEndPeriod(1L);
+    }
+    break;
+  default:
+    break;
   }
   return true;
 }
@@ -153,8 +154,8 @@
 // Implementation of os
 
 bool os::getenv(const char* name, char* buffer, int len) {
- int result = GetEnvironmentVariable(name, buffer, len);
- return result > 0 && result < len;
+  int result = GetEnvironmentVariable(name, buffer, len);
+  return result > 0 && result < len;
 }
 
 bool os::unsetenv(const char* name) {
@@ -179,67 +180,72 @@
 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 
 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
+
 void os::init_system_properties_values() {
-  /* sysclasspath, java_home, dll_dir */
+  // sysclasspath, java_home, dll_dir
   {
-      char *home_path;
-      char *dll_path;
-      char *pslash;
-      char *bin = "\\bin";
-      char home_dir[MAX_PATH];
-
-      if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
-          os::jvm_path(home_dir, sizeof(home_dir));
-          // Found the full path to jvm.dll.
-          // Now cut the path to <java_home>/jre if we can.
-          *(strrchr(home_dir, '\\')) = '\0';  /* get rid of \jvm.dll */
-          pslash = strrchr(home_dir, '\\');
-          if (pslash != NULL) {
-              *pslash = '\0';                 /* get rid of \{client|server} */
-              pslash = strrchr(home_dir, '\\');
-              if (pslash != NULL)
-                  *pslash = '\0';             /* get rid of \bin */
-          }
+    char *home_path;
+    char *dll_path;
+    char *pslash;
+    char *bin = "\\bin";
+    char home_dir[MAX_PATH];
+
+    if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
+      os::jvm_path(home_dir, sizeof(home_dir));
+      // Found the full path to jvm.dll.
+      // Now cut the path to <java_home>/jre if we can.
+      *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
+      pslash = strrchr(home_dir, '\\');
+      if (pslash != NULL) {
+        *pslash = '\0';                   // get rid of \{client|server}
+        pslash = strrchr(home_dir, '\\');
+        if (pslash != NULL) {
+          *pslash = '\0';                 // get rid of \bin
+        }
       }
-
-      home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
-      if (home_path == NULL)
-          return;
-      strcpy(home_path, home_dir);
-      Arguments::set_java_home(home_path);
-
-      dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal);
-      if (dll_path == NULL)
-          return;
-      strcpy(dll_path, home_dir);
-      strcat(dll_path, bin);
-      Arguments::set_dll_dir(dll_path);
-
-      if (!set_boot_path('\\', ';'))
-          return;
-  }
-
-  /* library_path */
-  #define EXT_DIR "\\lib\\ext"
-  #define BIN_DIR "\\bin"
-  #define PACKAGE_DIR "\\Sun\\Java"
+    }
+
+    home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
+    if (home_path == NULL) {
+      return;
+    }
+    strcpy(home_path, home_dir);
+    Arguments::set_java_home(home_path);
+
+    dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
+                                mtInternal);
+    if (dll_path == NULL) {
+      return;
+    }
+    strcpy(dll_path, home_dir);
+    strcat(dll_path, bin);
+    Arguments::set_dll_dir(dll_path);
+
+    if (!set_boot_path('\\', ';')) {
+      return;
+    }
+  }
+
+// library_path
+#define EXT_DIR "\\lib\\ext"
+#define BIN_DIR "\\bin"
+#define PACKAGE_DIR "\\Sun\\Java"
   {
-    /* Win32 library search order (See the documentation for LoadLibrary):
-     *
-     * 1. The directory from which application is loaded.
-     * 2. The system wide Java Extensions directory (Java only)
-     * 3. System directory (GetSystemDirectory)
-     * 4. Windows directory (GetWindowsDirectory)
-     * 5. The PATH environment variable
-     * 6. The current directory
-     */
+    // Win32 library search order (See the documentation for LoadLibrary):
+    //
+    // 1. The directory from which application is loaded.
+    // 2. The system wide Java Extensions directory (Java only)
+    // 3. System directory (GetSystemDirectory)
+    // 4. Windows directory (GetWindowsDirectory)
+    // 5. The PATH environment variable
+    // 6. The current directory
 
     char *library_path;
     char tmp[MAX_PATH];
     char *path_str = ::getenv("PATH");
 
     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
-        sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
+                                    sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 
     library_path[0] = '\0';
 
@@ -261,8 +267,8 @@
     strcat(library_path, tmp);
 
     if (path_str) {
-        strcat(library_path, ";");
-        strcat(library_path, path_str);
+      strcat(library_path, ";");
+      strcat(library_path, path_str);
     }
 
     strcat(library_path, ";.");
@@ -271,27 +277,27 @@
     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
   }
 
-  /* Default extensions directory */
+  // Default extensions directory
   {
     char path[MAX_PATH];
     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
     GetWindowsDirectory(path, MAX_PATH);
     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
-        path, PACKAGE_DIR, EXT_DIR);
+            path, PACKAGE_DIR, EXT_DIR);
     Arguments::set_ext_dirs(buf);
   }
   #undef EXT_DIR
   #undef BIN_DIR
   #undef PACKAGE_DIR
 
-  /* Default endorsed standards directory. */
+  // Default endorsed standards directory.
   {
-    #define ENDORSED_DIR "\\lib\\endorsed"
+#define ENDORSED_DIR "\\lib\\endorsed"
     size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR);
     char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal);
     sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR);
     Arguments::set_endorsed_dirs(buf);
-    #undef ENDORSED_DIR
+#undef ENDORSED_DIR
   }
 
 #ifndef _WIN64
@@ -312,17 +318,16 @@
   os::breakpoint();
 }
 
-/*
- * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
- * So far, this method is only used by Native Memory Tracking, which is
- * only supported on Windows XP or later.
- */
+// RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
+// So far, this method is only used by Native Memory Tracking, which is
+// only supported on Windows XP or later.
+//
 int os::get_native_stack(address* stack, int frames, int toSkip) {
 #ifdef _NMT_NOINLINE_
-  toSkip ++;
+  toSkip++;
 #endif
   int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
-    (PVOID*)stack, NULL);
+                                                       (PVOID*)stack, NULL);
   for (int index = captured; index < frames; index ++) {
     stack[index] = NULL;
   }
@@ -347,13 +352,13 @@
 
   // Add up the sizes of all the regions with the same
   // AllocationBase.
-  while (1)
-  {
+  while (1) {
     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
-    if (stack_bottom == (address)minfo.AllocationBase)
+    if (stack_bottom == (address)minfo.AllocationBase) {
       stack_size += minfo.RegionSize;
-    else
+    } else {
       break;
+    }
   }
 
 #ifdef _M_IA64
@@ -440,10 +445,10 @@
   // by VM, so VM can generate error dump when an exception occurred in non-
   // Java thread (e.g. VM thread).
   __try {
-     thread->run();
+    thread->run();
   } __except(topLevelExceptionFilter(
-             (_EXCEPTION_POINTERS*)_exception_info())) {
-      // Nothing to do.
+                                     (_EXCEPTION_POINTERS*)_exception_info())) {
+    // Nothing to do.
   }
 
   // One less thread is executing
@@ -458,7 +463,8 @@
   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 }
 
-static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) {
+static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
+                                  int thread_id) {
   // Allocate the OSThread object
   OSThread* osthread = new OSThread(NULL, NULL);
   if (osthread == NULL) return NULL;
@@ -501,7 +507,7 @@
   OSThread* osthread = create_os_thread(thread, thread_h,
                                         (int)current_thread_id());
   if (osthread == NULL) {
-     return false;
+    return false;
   }
 
   // Initial thread state is RUNNABLE
@@ -517,9 +523,9 @@
 #endif
   if (_starting_thread == NULL) {
     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
-     if (_starting_thread == NULL) {
-        return false;
-     }
+    if (_starting_thread == NULL) {
+      return false;
+    }
   }
 
   // The primordial thread is runnable from the start)
@@ -530,7 +536,8 @@
 }
 
 // Allocate and initialize a new OSThread
-bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
+bool os::create_thread(Thread* thread, ThreadType thr_type,
+                       size_t stack_size) {
   unsigned thread_id;
 
   // Allocate the OSThread object
@@ -554,8 +561,9 @@
     switch (thr_type) {
     case os::java_thread:
       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
-      if (JavaThread::stack_size_at_create() > 0)
+      if (JavaThread::stack_size_at_create() > 0) {
         stack_size = JavaThread::stack_size_at_create();
+      }
       break;
     case os::compiler_thread:
       if (CompilerThreadStackSize > 0) {
@@ -594,7 +602,7 @@
   // flag appears to work with _beginthredex() as well.
 
 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION
-#define STACK_SIZE_PARAM_IS_A_RESERVATION  (0x10000)
+  #define STACK_SIZE_PARAM_IS_A_RESERVATION  (0x10000)
 #endif
 
   HANDLE thread_handle =
@@ -608,12 +616,12 @@
     // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again
     // without the flag.
     thread_handle =
-    (HANDLE)_beginthreadex(NULL,
-                           (unsigned)stack_size,
-                           (unsigned (__stdcall *)(void*)) java_start,
-                           thread,
-                           CREATE_SUSPENDED,
-                           &thread_id);
+      (HANDLE)_beginthreadex(NULL,
+                             (unsigned)stack_size,
+                             (unsigned (__stdcall *)(void*)) java_start,
+                             thread,
+                             CREATE_SUSPENDED,
+                             &thread_id);
   }
   if (thread_handle == NULL) {
     // Need to clean up stuff we've allocated so far
@@ -675,8 +683,8 @@
   if (win32::_has_performance_count) {
     return performance_frequency;
   } else {
-   // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601.
-   return 10000000;
+    // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601.
+    return 10000000;
   }
 }
 
@@ -908,15 +916,15 @@
 }
 
 bool os::getTimesSecs(double* process_real_time,
-                     double* process_user_time,
-                     double* process_system_time) {
+                      double* process_user_time,
+                      double* process_system_time) {
   HANDLE h_process = GetCurrentProcess();
   FILETIME create_time, exit_time, kernel_time, user_time;
   BOOL result = GetProcessTimes(h_process,
-                               &create_time,
-                               &exit_time,
-                               &kernel_time,
-                               &user_time);
+                                &create_time,
+                                &exit_time,
+                                &kernel_time,
+                                &user_time);
   if (result != 0) {
     FILETIME wt;
     GetSystemTimeAsFileTime(&wt);
@@ -933,7 +941,6 @@
 }
 
 void os::shutdown() {
-
   // allow PerfMemory to attempt cleanup of any persistent resources
   perfMemory_exit();
 
@@ -948,8 +955,10 @@
 }
 
 
-static BOOL  (WINAPI *_MiniDumpWriteDump)  ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
-                                            PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION);
+static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
+                                         PMINIDUMP_EXCEPTION_INFORMATION,
+                                         PMINIDUMP_USER_STREAM_INFORMATION,
+                                         PMINIDUMP_CALLBACK_INFORMATION);
 
 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) {
   HINSTANCE dbghelp;
@@ -988,10 +997,13 @@
     return;
   }
 
-  _MiniDumpWriteDump = CAST_TO_FN_PTR(
-    BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
-    PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION),
-    GetProcAddress(dbghelp, "MiniDumpWriteDump"));
+  _MiniDumpWriteDump =
+      CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
+                                    PMINIDUMP_EXCEPTION_INFORMATION,
+                                    PMINIDUMP_USER_STREAM_INFORMATION,
+                                    PMINIDUMP_CALLBACK_INFORMATION),
+                                    GetProcAddress(dbghelp,
+                                    "MiniDumpWriteDump"));
 
   if (_MiniDumpWriteDump == NULL) {
     VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false);
@@ -1004,7 +1016,7 @@
 // API_VERSION_NUMBER 11 or higher contains the ones we want though
 #if API_VERSION_NUMBER >= 11
   dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo |
-    MiniDumpWithUnloadedModules);
+                             MiniDumpWithUnloadedModules);
 #endif
 
   cwd = get_current_directory(NULL, 0);
@@ -1031,21 +1043,21 @@
   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
   if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
       _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
-        DWORD error = GetLastError();
-        LPTSTR msgbuf = NULL;
-
-        if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+    DWORD error = GetLastError();
+    LPTSTR msgbuf = NULL;
+
+    if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
                       FORMAT_MESSAGE_FROM_SYSTEM |
                       FORMAT_MESSAGE_IGNORE_INSERTS,
                       NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) {
 
-          jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf);
-          LocalFree(msgbuf);
-        } else {
-          // Call to FormatMessage failed, just include the result from GetLastError
-          jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error);
-        }
-        VMError::report_coredump_status(buffer, false);
+      jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf);
+      LocalFree(msgbuf);
+    } else {
+      // Call to FormatMessage failed, just include the result from GetLastError
+      jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error);
+    }
+    VMError::report_coredump_status(buffer, false);
   } else {
     VMError::report_coredump_status(buffer, true);
   }
@@ -1070,126 +1082,118 @@
 //
 // The declarations for DIR and struct dirent are in jvm_win32.h.
 
-/* Caller must have already run dirname through JVM_NativePath, which removes
-   duplicate slashes and converts all instances of '/' into '\\'. */
-
-DIR *
-os::opendir(const char *dirname)
-{
-    assert(dirname != NULL, "just checking");   // hotspot change
-    DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
-    DWORD fattr;                                // hotspot change
-    char alt_dirname[4] = { 0, 0, 0, 0 };
-
-    if (dirp == 0) {
-        errno = ENOMEM;
-        return 0;
-    }
-
-    /*
-     * Win32 accepts "\" in its POSIX stat(), but refuses to treat it
-     * as a directory in FindFirstFile().  We detect this case here and
-     * prepend the current drive name.
-     */
-    if (dirname[1] == '\0' && dirname[0] == '\\') {
-        alt_dirname[0] = _getdrive() + 'A' - 1;
-        alt_dirname[1] = ':';
-        alt_dirname[2] = '\\';
-        alt_dirname[3] = '\0';
-        dirname = alt_dirname;
-    }
-
-    dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
-    if (dirp->path == 0) {
-        free(dirp, mtInternal);
-        errno = ENOMEM;
-        return 0;
-    }
-    strcpy(dirp->path, dirname);
-
-    fattr = GetFileAttributes(dirp->path);
-    if (fattr == 0xffffffff) {
-        free(dirp->path, mtInternal);
-        free(dirp, mtInternal);
-        errno = ENOENT;
-        return 0;
-    } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
-        free(dirp->path, mtInternal);
-        free(dirp, mtInternal);
-        errno = ENOTDIR;
-        return 0;
-    }
-
-    /* Append "*.*", or possibly "\\*.*", to path */
-    if (dirp->path[1] == ':'
-        && (dirp->path[2] == '\0'
-            || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
-        /* No '\\' needed for cases like "Z:" or "Z:\" */
-        strcat(dirp->path, "*.*");
-    } else {
-        strcat(dirp->path, "\\*.*");
-    }
-
-    dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
-    if (dirp->handle == INVALID_HANDLE_VALUE) {
-        if (GetLastError() != ERROR_FILE_NOT_FOUND) {
-            free(dirp->path, mtInternal);
-            free(dirp, mtInternal);
-            errno = EACCES;
-            return 0;
-        }
-    }
-    return dirp;
-}
-
-/* parameter dbuf unused on Windows */
-
-struct dirent *
-os::readdir(DIR *dirp, dirent *dbuf)
-{
-    assert(dirp != NULL, "just checking");      // hotspot change
-    if (dirp->handle == INVALID_HANDLE_VALUE) {
-        return 0;
-    }
-
-    strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
-
-    if (!FindNextFile(dirp->handle, &dirp->find_data)) {
-        if (GetLastError() == ERROR_INVALID_HANDLE) {
-            errno = EBADF;
-            return 0;
-        }
-        FindClose(dirp->handle);
-        dirp->handle = INVALID_HANDLE_VALUE;
-    }
-
-    return &dirp->dirent;
-}
-
-int
-os::closedir(DIR *dirp)
-{
-    assert(dirp != NULL, "just checking");      // hotspot change
-    if (dirp->handle != INVALID_HANDLE_VALUE) {
-        if (!FindClose(dirp->handle)) {
-            errno = EBADF;
-            return -1;
-        }
-        dirp->handle = INVALID_HANDLE_VALUE;
-    }
+// Caller must have already run dirname through JVM_NativePath, which removes
+// duplicate slashes and converts all instances of '/' into '\\'.
+
+DIR * os::opendir(const char *dirname) {
+  assert(dirname != NULL, "just checking");   // hotspot change
+  DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
+  DWORD fattr;                                // hotspot change
+  char alt_dirname[4] = { 0, 0, 0, 0 };
+
+  if (dirp == 0) {
+    errno = ENOMEM;
+    return 0;
+  }
+
+  // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
+  // as a directory in FindFirstFile().  We detect this case here and
+  // prepend the current drive name.
+  //
+  if (dirname[1] == '\0' && dirname[0] == '\\') {
+    alt_dirname[0] = _getdrive() + 'A' - 1;
+    alt_dirname[1] = ':';
+    alt_dirname[2] = '\\';
+    alt_dirname[3] = '\0';
+    dirname = alt_dirname;
+  }
+
+  dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
+  if (dirp->path == 0) {
+    free(dirp, mtInternal);
+    errno = ENOMEM;
+    return 0;
+  }
+  strcpy(dirp->path, dirname);
+
+  fattr = GetFileAttributes(dirp->path);
+  if (fattr == 0xffffffff) {
+    free(dirp->path, mtInternal);
+    free(dirp, mtInternal);
+    errno = ENOENT;
+    return 0;
+  } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
     free(dirp->path, mtInternal);
     free(dirp, mtInternal);
+    errno = ENOTDIR;
     return 0;
+  }
+
+  // Append "*.*", or possibly "\\*.*", to path
+  if (dirp->path[1] == ':' &&
+      (dirp->path[2] == '\0' ||
+      (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
+    // No '\\' needed for cases like "Z:" or "Z:\"
+    strcat(dirp->path, "*.*");
+  } else {
+    strcat(dirp->path, "\\*.*");
+  }
+
+  dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
+  if (dirp->handle == INVALID_HANDLE_VALUE) {
+    if (GetLastError() != ERROR_FILE_NOT_FOUND) {
+      free(dirp->path, mtInternal);
+      free(dirp, mtInternal);
+      errno = EACCES;
+      return 0;
+    }
+  }
+  return dirp;
+}
+
+// parameter dbuf unused on Windows
+struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
+  assert(dirp != NULL, "just checking");      // hotspot change
+  if (dirp->handle == INVALID_HANDLE_VALUE) {
+    return 0;
+  }
+
+  strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
+
+  if (!FindNextFile(dirp->handle, &dirp->find_data)) {
+    if (GetLastError() == ERROR_INVALID_HANDLE) {
+      errno = EBADF;
+      return 0;
+    }
+    FindClose(dirp->handle);
+    dirp->handle = INVALID_HANDLE_VALUE;
+  }
+
+  return &dirp->dirent;
+}
+
+int os::closedir(DIR *dirp) {
+  assert(dirp != NULL, "just checking");      // hotspot change
+  if (dirp->handle != INVALID_HANDLE_VALUE) {
+    if (!FindClose(dirp->handle)) {
+      errno = EBADF;
+      return -1;
+    }
+    dirp->handle = INVALID_HANDLE_VALUE;
+  }
+  free(dirp->path, mtInternal);
+  free(dirp, mtInternal);
+  return 0;
 }
 
 // This must be hard coded because it's the system's temporary
 // directory not the java application's temp directory, ala java.io.tmpdir.
 const char* os::get_temp_directory() {
   static char path_buf[MAX_PATH];
-  if (GetTempPath(MAX_PATH, path_buf)>0)
+  if (GetTempPath(MAX_PATH, path_buf) > 0) {
     return path_buf;
-  else{
-    path_buf[0]='\0';
+  } else {
+    path_buf[0] = '\0';
     return path_buf;
   }
 }
@@ -1272,47 +1276,48 @@
 // Helper routine which returns true if address in
 // within the NTDLL address space.
 //
-static bool _addr_in_ntdll( address addr )
-{
+static bool _addr_in_ntdll(address addr) {
   HMODULE hmod;
   MODULEINFO minfo;
 
   hmod = GetModuleHandle("NTDLL.DLL");
   if (hmod == NULL) return false;
-  if (!os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod,
-                               &minfo, sizeof(MODULEINFO)) )
+  if (!os::PSApiDll::GetModuleInformation(GetCurrentProcess(), hmod,
+                                          &minfo, sizeof(MODULEINFO))) {
     return false;
+  }
 
   if ((addr >= minfo.lpBaseOfDll) &&
-       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage)))
+      (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
     return true;
-  else
+  } else {
     return false;
+  }
 }
 #endif
 
 struct _modinfo {
-   address addr;
-   char*   full_path;   // point to a char buffer
-   int     buflen;      // size of the buffer
-   address base_addr;
+  address addr;
+  char*   full_path;   // point to a char buffer
+  int     buflen;      // size of the buffer
+  address base_addr;
 };
 
 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
                                   address top_address, void * param) {
-   struct _modinfo *pmod = (struct _modinfo *)param;
-   if (!pmod) return -1;
-
-   if (base_addr   <= pmod->addr &&
-       top_address > pmod->addr) {
-     // if a buffer is provided, copy path name to the buffer
-     if (pmod->full_path) {
-       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
-     }
-     pmod->base_addr = base_addr;
-     return 1;
-   }
-   return 0;
+  struct _modinfo *pmod = (struct _modinfo *)param;
+  if (!pmod) return -1;
+
+  if (base_addr   <= pmod->addr &&
+      top_address > pmod->addr) {
+    // if a buffer is provided, copy path name to the buffer
+    if (pmod->full_path) {
+      jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
+    }
+    pmod->base_addr = base_addr;
+    return 1;
+  }
+  return 0;
 }
 
 bool os::dll_address_to_library_name(address addr, char* buf,
@@ -1355,16 +1360,16 @@
 
 // save the start and end address of jvm.dll into param[0] and param[1]
 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
-                    address top_address, void * param) {
-   if (!param) return -1;
-
-   if (base_addr   <= (address)_locate_jvm_dll &&
-       top_address > (address)_locate_jvm_dll) {
-         ((address*)param)[0] = base_addr;
-         ((address*)param)[1] = top_address;
-         return 1;
-   }
-   return 0;
+                           address top_address, void * param) {
+  if (!param) return -1;
+
+  if (base_addr   <= (address)_locate_jvm_dll &&
+      top_address > (address)_locate_jvm_dll) {
+    ((address*)param)[0] = base_addr;
+    ((address*)param)[1] = top_address;
+    return 1;
+  }
+  return 0;
 }
 
 address vm_lib_location[2];    // start and end address of jvm.dll
@@ -1384,29 +1389,27 @@
 // print module info; param is outputStream*
 static int _print_module(const char* fname, address base_address,
                          address top_address, void* param) {
-   if (!param) return -1;
-
-   outputStream* st = (outputStream*)param;
-
-   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
-   return 0;
+  if (!param) return -1;
+
+  outputStream* st = (outputStream*)param;
+
+  st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
+  return 0;
 }
 
 // Loads .dll/.so and
 // in case of error it checks if .dll/.so was built for the
 // same architecture as Hotspot is running on
-void * os::dll_load(const char *name, char *ebuf, int ebuflen)
-{
+void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
   void * result = LoadLibrary(name);
-  if (result != NULL)
-  {
+  if (result != NULL) {
     return result;
   }
 
   DWORD errcode = GetLastError();
   if (errcode == ERROR_MOD_NOT_FOUND) {
-    strncpy(ebuf, "Can't find dependent libraries", ebuflen-1);
-    ebuf[ebuflen-1]='\0';
+    strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
+    ebuf[ebuflen - 1] = '\0';
     return NULL;
   }
 
@@ -1419,107 +1422,99 @@
   // Read system error message into ebuf
   // It may or may not be overwritten below (in the for loop and just above)
   lasterror(ebuf, (size_t) ebuflen);
-  ebuf[ebuflen-1]='\0';
-  int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0);
-  if (file_descriptor<0)
-  {
+  ebuf[ebuflen - 1] = '\0';
+  int fd = ::open(name, O_RDONLY | O_BINARY, 0);
+  if (fd < 0) {
     return NULL;
   }
 
   uint32_t signature_offset;
-  uint16_t lib_arch=0;
-  bool failed_to_get_lib_arch=
-  (
-    //Go to position 3c in the dll
-    (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0)
-    ||
-    // Read loacation of signature
-    (sizeof(signature_offset)!=
-      (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset))))
-    ||
-    //Go to COFF File Header in dll
-    //that is located after"signature" (4 bytes long)
-    (os::seek_to_file_offset(file_descriptor,
-      signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0)
-    ||
-    //Read field that contains code of architecture
-    // that dll was build for
-    (sizeof(lib_arch)!=
-      (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch))))
-  );
-
-  ::close(file_descriptor);
-  if (failed_to_get_lib_arch)
-  {
+  uint16_t lib_arch = 0;
+  bool failed_to_get_lib_arch =
+    ( // Go to position 3c in the dll
+     (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
+     ||
+     // Read location of signature
+     (sizeof(signature_offset) !=
+     (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
+     ||
+     // Go to COFF File Header in dll
+     // that is located after "signature" (4 bytes long)
+     (os::seek_to_file_offset(fd,
+     signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
+     ||
+     // Read field that contains code of architecture
+     // that dll was built for
+     (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
+    );
+
+  ::close(fd);
+  if (failed_to_get_lib_arch) {
     // file i/o error - report os::lasterror(...) msg
     return NULL;
   }
 
-  typedef struct
-  {
+  typedef struct {
     uint16_t arch_code;
     char* arch_name;
   } arch_t;
 
-  static const arch_t arch_array[]={
+  static const arch_t arch_array[] = {
     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
     {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
   };
-  #if   (defined _M_IA64)
-    static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64;
-  #elif (defined _M_AMD64)
-    static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64;
-  #elif (defined _M_IX86)
-    static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386;
-  #else
-    #error Method os::dll_load requires that one of following \
-           is defined :_M_IA64,_M_AMD64 or _M_IX86
-  #endif
+#if   (defined _M_IA64)
+  static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64;
+#elif (defined _M_AMD64)
+  static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
+#elif (defined _M_IX86)
+  static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
+#else
+  #error Method os::dll_load requires that one of following \
+         is defined :_M_IA64,_M_AMD64 or _M_IX86
+#endif
 
 
   // Obtain a string for printf operation
   // lib_arch_str shall contain string what platform this .dll was built for
   // running_arch_str shall string contain what platform Hotspot was built for
-  char *running_arch_str=NULL,*lib_arch_str=NULL;
-  for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++)
-  {
-    if (lib_arch==arch_array[i].arch_code)
-      lib_arch_str=arch_array[i].arch_name;
-    if (running_arch==arch_array[i].arch_code)
-      running_arch_str=arch_array[i].arch_name;
+  char *running_arch_str = NULL, *lib_arch_str = NULL;
+  for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
+    if (lib_arch == arch_array[i].arch_code) {
+      lib_arch_str = arch_array[i].arch_name;
+    }
+    if (running_arch == arch_array[i].arch_code) {
+      running_arch_str = arch_array[i].arch_name;
+    }
   }
 
   assert(running_arch_str,
-    "Didn't find runing architecture code in arch_array");
-
-  // If the architure is right
+         "Didn't find running architecture code in arch_array");
+
+  // If the architecture is right
   // but some other error took place - report os::lasterror(...) msg
-  if (lib_arch == running_arch)
-  {
+  if (lib_arch == running_arch) {
     return NULL;
   }
 
-  if (lib_arch_str!=NULL)
-  {
-    ::_snprintf(ebuf, ebuflen-1,
-      "Can't load %s-bit .dll on a %s-bit platform",
-      lib_arch_str,running_arch_str);
-  }
-  else
-  {
+  if (lib_arch_str != NULL) {
+    ::_snprintf(ebuf, ebuflen - 1,
+                "Can't load %s-bit .dll on a %s-bit platform",
+                lib_arch_str, running_arch_str);
+  } else {
     // don't know what architecture this dll was build for
-    ::_snprintf(ebuf, ebuflen-1,
-      "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
-      lib_arch,running_arch_str);
+    ::_snprintf(ebuf, ebuflen - 1,
+                "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
+                lib_arch, running_arch_str);
   }
 
   return NULL;
 }
 
 void os::print_dll_info(outputStream *st) {
-   st->print_cr("Dynamic libraries:");
-   get_loaded_modules_info(_print_module, (void *)st);
+  st->print_cr("Dynamic libraries:");
+  get_loaded_modules_info(_print_module, (void *)st);
 }
 
 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
@@ -1541,9 +1536,9 @@
 
   DWORD size_needed;
   if (!os::PSApiDll::EnumProcessModules(hProcess, modules,
-                           sizeof(modules), &size_needed)) {
-      CloseHandle(hProcess);
-      return 0;
+                                        sizeof(modules), &size_needed)) {
+    CloseHandle(hProcess);
+    return 0;
   }
 
   // number of modules that are currently loaded
@@ -1552,20 +1547,20 @@
   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
     // Get Full pathname:
     if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
-                             filename, sizeof(filename))) {
-        filename[0] = '\0';
+                                           filename, sizeof(filename))) {
+      filename[0] = '\0';
     }
 
     MODULEINFO modinfo;
     if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i],
-                               &modinfo, sizeof(modinfo))) {
-        modinfo.lpBaseOfDll = NULL;
-        modinfo.SizeOfImage = 0;
+                                            &modinfo, sizeof(modinfo))) {
+      modinfo.lpBaseOfDll = NULL;
+      modinfo.SizeOfImage = 0;
     }
 
     // Invoke callback function
     result = callback(filename, (address)modinfo.lpBaseOfDll,
-                  (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
+                      (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
     if (result) break;
   }
 
@@ -1716,13 +1711,13 @@
 
   if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
       er->NumberParameters >= 2) {
-      switch (er->ExceptionInformation[0]) {
-      case 0: st->print(", reading address"); break;
-      case 1: st->print(", writing address"); break;
-      default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
-                            er->ExceptionInformation[0]);
-      }
-      st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
+    switch (er->ExceptionInformation[0]) {
+    case 0: st->print(", reading address"); break;
+    case 1: st->print(", writing address"); break;
+    default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
+                       er->ExceptionInformation[0]);
+    }
+    st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
   } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR &&
              er->NumberParameters >= 2 && UseSharedSpaces) {
     FileMapInfo* mapinfo = FileMapInfo::current_info();
@@ -1772,7 +1767,6 @@
     char* java_home_var = ::getenv("JAVA_HOME");
     if (java_home_var != NULL && java_home_var[0] != 0 &&
         strlen(java_home_var) < (size_t)buflen) {
-
       strncpy(buf, java_home_var, buflen);
 
       // determine if this is a legacy image or modules image
@@ -1817,13 +1811,13 @@
   if ((errval = GetLastError()) != 0) {
     // DOS error
     size_t n = (size_t)FormatMessage(
-          FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
-          NULL,
-          errval,
-          0,
-          buf,
-          (DWORD)len,
-          NULL);
+                                     FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
+                                     NULL,
+                                     errval,
+                                     0,
+                                     buf,
+                                     (DWORD)len,
+                                     NULL);
     if (n > 3) {
       // Drop final '.', CR, LF
       if (buf[n - 1] == '\n') n--;
@@ -1849,8 +1843,9 @@
 
 int os::get_last_error() {
   DWORD error = GetLastError();
-  if (error == 0)
+  if (error == 0) {
     error = errno;
+  }
   return (int)error;
 }
 
@@ -1892,52 +1887,50 @@
 //
 static BOOL WINAPI consoleHandler(DWORD event) {
   switch (event) {
-    case CTRL_C_EVENT:
-      if (is_error_reported()) {
-        // Ctrl-C is pressed during error reporting, likely because the error
-        // handler fails to abort. Let VM die immediately.
-        os::die();
-      }
-
-      os::signal_raise(SIGINT);
-      return TRUE;
-      break;
-    case CTRL_BREAK_EVENT:
-      if (sigbreakHandler != NULL) {
-        (*sigbreakHandler)(SIGBREAK);
-      }
-      return TRUE;
-      break;
-    case CTRL_LOGOFF_EVENT: {
-      // Don't terminate JVM if it is running in a non-interactive session,
-      // such as a service process.
-      USEROBJECTFLAGS flags;
-      HANDLE handle = GetProcessWindowStation();
-      if (handle != NULL &&
-          GetUserObjectInformation(handle, UOI_FLAGS, &flags,
-            sizeof(USEROBJECTFLAGS), NULL)) {
-        // If it is a non-interactive session, let next handler to deal
-        // with it.
-        if ((flags.dwFlags & WSF_VISIBLE) == 0) {
-          return FALSE;
-        }
+  case CTRL_C_EVENT:
+    if (is_error_reported()) {
+      // Ctrl-C is pressed during error reporting, likely because the error
+      // handler fails to abort. Let VM die immediately.
+      os::die();
+    }
+
+    os::signal_raise(SIGINT);
+    return TRUE;
+    break;
+  case CTRL_BREAK_EVENT:
+    if (sigbreakHandler != NULL) {
+      (*sigbreakHandler)(SIGBREAK);
+    }
+    return TRUE;
+    break;
+  case CTRL_LOGOFF_EVENT: {
+    // Don't terminate JVM if it is running in a non-interactive session,
+    // such as a service process.
+    USEROBJECTFLAGS flags;
+    HANDLE handle = GetProcessWindowStation();
+    if (handle != NULL &&
+        GetUserObjectInformation(handle, UOI_FLAGS, &flags,
+        sizeof(USEROBJECTFLAGS), NULL)) {
+      // If it is a non-interactive session, let next handler to deal
+      // with it.
+      if ((flags.dwFlags & WSF_VISIBLE) == 0) {
+        return FALSE;
       }
     }
-    case CTRL_CLOSE_EVENT:
-    case CTRL_SHUTDOWN_EVENT:
-      os::signal_raise(SIGTERM);
-      return TRUE;
-      break;
-    default:
-      break;
+  }
+  case CTRL_CLOSE_EVENT:
+  case CTRL_SHUTDOWN_EVENT:
+    os::signal_raise(SIGTERM);
+    return TRUE;
+    break;
+  default:
+    break;
   }
   return FALSE;
 }
 
-/*
- * The following code is moved from os.cpp for making this
- * code platform specific, which it is by its very nature.
- */
+// The following code is moved from os.cpp for making this
+// code platform specific, which it is by its very nature.
 
 // Return maximum OS signal used + 1 for internal use only
 // Used as exit signal for signal_thread
@@ -2011,12 +2004,10 @@
       // were we externally suspended while we were waiting?
       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
       if (threadIsSuspended) {
-        //
         // The semaphore has been incremented, but while we were waiting
         // another thread suspended us. We don't want to continue running
         // while suspended because that would surprise the thread that
         // suspended us.
-        //
         ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
         assert(ret != 0, "ReleaseSemaphore() failed");
 
@@ -2036,7 +2027,8 @@
 
 // Implicit OS exception handling
 
-LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) {
+LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
+                      address handler) {
   JavaThread* thread = JavaThread::current();
   // Save pc in thread
 #ifdef _M_IA64
@@ -2094,7 +2086,7 @@
 
 // Handle NAT Bit consumption on IA64.
 #ifdef _M_IA64
-#define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
+  #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
 #endif
 
 // Windows Vista/2008 heap corruption check
@@ -2152,8 +2144,8 @@
 const char* os::exception_name(int exception_code, char *buf, size_t size) {
   for (int i = 0; exceptlabels[i].name != NULL; i++) {
     if (exceptlabels[i].number == exception_code) {
-       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
-       return buf;
+      jio_snprintf(buf, size, "%s", exceptlabels[i].name);
+      return buf;
     }
   }
 
@@ -2200,21 +2192,21 @@
   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
 
   switch (exception_code) {
-    case EXCEPTION_FLT_DENORMAL_OPERAND:
-    case EXCEPTION_FLT_DIVIDE_BY_ZERO:
-    case EXCEPTION_FLT_INEXACT_RESULT:
-    case EXCEPTION_FLT_INVALID_OPERATION:
-    case EXCEPTION_FLT_OVERFLOW:
-    case EXCEPTION_FLT_STACK_CHECK:
-    case EXCEPTION_FLT_UNDERFLOW:
-      jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
-      if (fp_control_word != ctx->FloatSave.ControlWord) {
-        // Restore FPCW and mask out FLT exceptions
-        ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
-        // Mask out pending FLT exceptions
-        ctx->FloatSave.StatusWord &=  0xffffff00;
-        return EXCEPTION_CONTINUE_EXECUTION;
-      }
+  case EXCEPTION_FLT_DENORMAL_OPERAND:
+  case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+  case EXCEPTION_FLT_INEXACT_RESULT:
+  case EXCEPTION_FLT_INVALID_OPERATION:
+  case EXCEPTION_FLT_OVERFLOW:
+  case EXCEPTION_FLT_STACK_CHECK:
+  case EXCEPTION_FLT_UNDERFLOW:
+    jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
+    if (fp_control_word != ctx->FloatSave.ControlWord) {
+      // Restore FPCW and mask out FLT exceptions
+      ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
+      // Mask out pending FLT exceptions
+      ctx->FloatSave.StatusWord &=  0xffffff00;
+      return EXCEPTION_CONTINUE_EXECUTION;
+    }
   }
 
   if (prev_uef_handler != NULL) {
@@ -2223,17 +2215,16 @@
     return (prev_uef_handler)(exceptionInfo);
   }
 #else // !_WIN64
-/*
-  On Windows, the mxcsr control bits are non-volatile across calls
-  See also CR 6192333
-  */
-      jint MxCsr = INITIAL_MXCSR;
-        // we can't use StubRoutines::addr_mxcsr_std()
-        // because in Win64 mxcsr is not saved there
-      if (MxCsr != ctx->MxCsr) {
-        ctx->MxCsr = MxCsr;
-        return EXCEPTION_CONTINUE_EXECUTION;
-      }
+  // On Windows, the mxcsr control bits are non-volatile across calls
+  // See also CR 6192333
+  //
+  jint MxCsr = INITIAL_MXCSR;
+  // we can't use StubRoutines::addr_mxcsr_std()
+  // because in Win64 mxcsr is not saved there
+  if (MxCsr != ctx->MxCsr) {
+    ctx->MxCsr = MxCsr;
+    return EXCEPTION_CONTINUE_EXECUTION;
+  }
 #endif // !_WIN64
 
   return EXCEPTION_CONTINUE_SEARCH;
@@ -2419,7 +2410,7 @@
           thread->enable_register_stack_red_zone();
 
           return Handle_Exception(exceptionInfo,
-            SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
+                                  SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
         }
 #endif
         if (thread->stack_yellow_zone_enabled()) {
@@ -2428,9 +2419,9 @@
           // update the enabled status, even if the zone contains only one page.
           thread->disable_stack_yellow_zone();
           // If not in java code, return and hope for the best.
-          return in_java ? Handle_Exception(exceptionInfo,
-            SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
-            :  EXCEPTION_CONTINUE_EXECUTION;
+          return in_java
+              ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
+              :  EXCEPTION_CONTINUE_EXECUTION;
         } else {
           // Fatal red zone violation.
           thread->disable_stack_red_zone();
@@ -2444,7 +2435,7 @@
         // a one-time-only guard page, which it has released to us.  The next
         // stack overflow on this thread will result in an ACCESS_VIOLATION.
         return Handle_Exception(exceptionInfo,
-          SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
+                                SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
       } else {
         // Can only return and hope for the best.  Further stack growth will
         // result in an ACCESS_VIOLATION.
@@ -2459,15 +2450,13 @@
         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
           // Stack overflow.
           assert(!os::uses_stack_guard_pages(),
-            "should be caught by red zone code above.");
+                 "should be caught by red zone code above.");
           return Handle_Exception(exceptionInfo,
-            SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
+                                  SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
         }
-        //
         // Check for safepoint polling and implicit null
         // We only expect null pointers in the stubs (vtable)
         // the rest are checked explicitly now.
-        //
         CodeBlob* cb = CodeCache::find_blob(pc);
         if (cb != NULL) {
           if (os::is_poll_address(addr)) {
@@ -2477,19 +2466,17 @@
         }
         {
 #ifdef _WIN64
-          //
           // If it's a legal stack address map the entire region in
           //
           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
           address addr = (address) exceptionRecord->ExceptionInformation[1];
           if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) {
-                  addr = (address)((uintptr_t)addr &
-                         (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
-                  os::commit_memory((char *)addr, thread->stack_base() - addr,
-                                    !ExecMem);
-                  return EXCEPTION_CONTINUE_EXECUTION;
-          }
-          else
+            addr = (address)((uintptr_t)addr &
+                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
+            os::commit_memory((char *)addr, thread->stack_base() - addr,
+                              !ExecMem);
+            return EXCEPTION_CONTINUE_EXECUTION;
+          } else
 #endif
           {
             // Null pointer exception.
@@ -2509,7 +2496,7 @@
                                 *(bundle_start + 1), *bundle_start);
                 }
                 return Handle_Exception(exceptionInfo,
-                  SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
+                                        SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
               }
             }
 
@@ -2563,7 +2550,7 @@
       // 1. must be first instruction in bundle
       // 2. must be a break instruction with appropriate code
       if ((((uint64_t) pc & 0x0F) == 0) &&
-         (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
+          (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
         return Handle_Exception(exceptionInfo,
                                 (address)SharedRuntime::get_handle_wrong_method_stub());
       }
@@ -2582,9 +2569,8 @@
       } // switch
     }
     if (((thread->thread_state() == _thread_in_Java) ||
-        (thread->thread_state() == _thread_in_native)) &&
-        exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION)
-    {
+         (thread->thread_state() == _thread_in_native)) &&
+         exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
       LONG result=Handle_FLT_Exception(exceptionInfo);
       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
     }
@@ -2615,14 +2601,19 @@
   return EXCEPTION_CONTINUE_SEARCH;
 }
 
-#define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \
-Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \
-  __try { \
-    return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \
-  } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \
-  } \
-  return 0; \
-}
+#define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
+  Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
+                                                     jobject obj,           \
+                                                     jfieldID fieldID) {    \
+    __try {                                                                 \
+      return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
+                                                                 obj,       \
+                                                                 fieldID);  \
+    } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
+                                              _exception_info())) {         \
+    }                                                                       \
+    return 0;                                                               \
+  }
 
 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
@@ -2635,15 +2626,15 @@
 
 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
   switch (type) {
-    case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
-    case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
-    case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
-    case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
-    case T_INT:     return (address)jni_fast_GetIntField_wrapper;
-    case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
-    case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
-    case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
-    default:        ShouldNotReachHere();
+  case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
+  case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
+  case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
+  case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
+  case T_INT:     return (address)jni_fast_GetIntField_wrapper;
+  case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
+  case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
+  case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
+  default:        ShouldNotReachHere();
   }
   return (address)-1;
 }
@@ -2655,7 +2646,7 @@
   __try {
     (*funcPtr)();
   } __except(topLevelExceptionFilter(
-             (_EXCEPTION_POINTERS*)_exception_info())) {
+                                     (_EXCEPTION_POINTERS*)_exception_info())) {
     // Nothing to do.
   }
 }
@@ -2686,7 +2677,7 @@
 // in the future, if so the code below needs to be revisited.
 
 #ifndef MEM_LARGE_PAGES
-#define MEM_LARGE_PAGES 0x20000000
+  #define MEM_LARGE_PAGES 0x20000000
 #endif
 
 static HANDLE    _hProcess;
@@ -2694,7 +2685,7 @@
 
 // Container for NUMA node list info
 class NUMANodeListHolder {
-private:
+ private:
   int *_numa_used_node_list;  // allocated below
   int _numa_used_node_count;
 
@@ -2704,7 +2695,7 @@
     }
   }
 
-public:
+ public:
   NUMANodeListHolder() {
     _numa_used_node_count = 0;
     _numa_used_node_list = NULL;
@@ -2752,7 +2743,7 @@
 
 static bool request_lock_memory_privilege() {
   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
-                                os::current_process_id());
+                          os::current_process_id());
 
   LUID luid;
   if (_hProcess != NULL &&
@@ -2788,7 +2779,7 @@
 
   // print a warning if UseNUMAInterleaving flag is specified on command line
   bool warn_on_failure = use_numa_interleaving_specified;
-# define WARN(msg) if (warn_on_failure) { warning(msg); }
+#define WARN(msg) if (warn_on_failure) { warning(msg); }
 
   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
@@ -2822,8 +2813,9 @@
 // Reasons for doing this:
 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
 //  * UseNUMAInterleaving requires a separate node for each piece
-static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot,
-                                         bool should_inject_error=false) {
+static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
+                                         DWORD prot,
+                                         bool should_inject_error = false) {
   char * p_buf;
   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
@@ -2912,7 +2904,7 @@
         // need to create a dummy 'reserve' record to match
         // the release.
         MemTracker::record_virtual_memory_reserve((address)p_buf,
-          bytes_to_release, CALLER_PC);
+                                                  bytes_to_release, CALLER_PC);
         os::release_memory(p_buf, bytes_to_release);
       }
 #ifdef ASSERT
@@ -2951,7 +2943,7 @@
                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
   bool success = false;
 
-# define WARN(msg) if (warn_on_failure) { warning(msg); }
+#define WARN(msg) if (warn_on_failure) { warning(msg); }
   if (resolve_functions_for_large_page_init()) {
     if (request_lock_memory_privilege()) {
       size_t s = os::Kernel32Dll::GetLargePageMinimum();
@@ -2996,7 +2988,7 @@
 // all or nothing deal.  When we split a reservation, we must break the
 // reservation into two reservations.
 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
-                              bool realloc) {
+                                  bool realloc) {
   if (size > 0) {
     release_memory(base, size);
     if (realloc) {
@@ -3013,7 +3005,7 @@
 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
-      "Alignment must be a multiple of allocation granularity (page size)");
+         "Alignment must be a multiple of allocation granularity (page size)");
   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
 
   size_t extra_size = size + alignment;
@@ -3092,7 +3084,8 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
+                                 bool exec) {
   assert(UseLargePages, "only for large pages");
 
   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
@@ -3107,7 +3100,7 @@
   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
     if (TracePageSizes && Verbose) {
-       tty->print_cr("Reserving large pages individually.");
+      tty->print_cr("Reserving large pages individually.");
     }
     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
     if (p_buf == NULL) {
@@ -3126,7 +3119,7 @@
 
   } else {
     if (TracePageSizes && Verbose) {
-       tty->print_cr("Reserving large pages in a single large chunk.");
+      tty->print_cr("Reserving large pages in a single large chunk.");
     }
     // normal policy just allocate it all at once
     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
@@ -3219,7 +3212,7 @@
 }
 
 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
-                       bool exec) {
+                          bool exec) {
   // alignment_hint is ignored on this OS
   return pd_commit_memory(addr, size, exec);
 }
@@ -3328,7 +3321,8 @@
   return false;
 }
 
-char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
+char *os::scan_pages(char *start, char* end, page_info* page_expected,
+                     page_info* page_found) {
   return end;
 }
 
@@ -3367,9 +3361,9 @@
   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
   // resolution timers running.
-private:
-    jlong resolution;
-public:
+ private:
+  jlong resolution;
+ public:
   HighResolutionInterval(jlong ms) {
     resolution = ms % 10L;
     if (resolution != 0) {
@@ -3389,8 +3383,9 @@
 
   while (ms > limit) {
     int res;
-    if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT)
+    if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
       return res;
+    }
     ms -= limit;
   }
 
@@ -3410,8 +3405,9 @@
     HANDLE events[1];
     events[0] = osthread->interrupt_event();
     HighResolutionInterval *phri=NULL;
-    if (!ForceTimeHighResolution)
+    if (!ForceTimeHighResolution) {
       phri = new HighResolutionInterval(ms);
+    }
     if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
       result = OS_TIMEOUT;
     } else {
@@ -3431,7 +3427,6 @@
   return result;
 }
 
-//
 // Short sleep, direct OS call.
 //
 // ms = 0, means allow others (if any) to run.
@@ -3514,7 +3509,8 @@
   return ret ? OS_OK : OS_ERR;
 }
 
-OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) {
+OSReturn os::get_native_priority(const Thread* const thread,
+                                 int* priority_ptr) {
   if (!UseThreadPriorities) {
     *priority_ptr = java_to_os_priority[NormPriority];
     return OS_OK;
@@ -3534,7 +3530,8 @@
 void os::hint_no_preempt() {}
 
 void os::interrupt(Thread* thread) {
-  assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
+  assert(!thread->is_Java_thread() || Thread::current() == thread ||
+         Threads_lock->owned_by_self(),
          "possibility of dangling Thread pointer");
 
   OSThread* osthread = thread->osthread();
@@ -3546,12 +3543,12 @@
   OrderAccess::release();
   SetEvent(osthread->interrupt_event());
   // For JSR166:  unpark after setting status
-  if (thread->is_Java_thread())
+  if (thread->is_Java_thread()) {
     ((JavaThread*)thread)->parker()->unpark();
+  }
 
   ParkEvent * ev = thread->_ParkEvent;
   if (ev != NULL) ev->unpark();
-
 }
 
 
@@ -3598,35 +3595,34 @@
 }
 
 // GetCurrentThreadId() returns DWORD
-intx os::current_thread_id()          { return GetCurrentThreadId(); }
+intx os::current_thread_id()  { return GetCurrentThreadId(); }
 
 static int _initial_pid = 0;
 
-int os::current_process_id()
-{
+int os::current_process_id() {
   return (_initial_pid ? _initial_pid : _getpid());
 }
 
-int    os::win32::_vm_page_size       = 0;
+int    os::win32::_vm_page_size              = 0;
 int    os::win32::_vm_allocation_granularity = 0;
-int    os::win32::_processor_type     = 0;
+int    os::win32::_processor_type            = 0;
 // Processor level is not available on non-NT systems, use vm_version instead
-int    os::win32::_processor_level    = 0;
-julong os::win32::_physical_memory    = 0;
-size_t os::win32::_default_stack_size = 0;
-
-         intx os::win32::_os_thread_limit    = 0;
+int    os::win32::_processor_level           = 0;
+julong os::win32::_physical_memory           = 0;
+size_t os::win32::_default_stack_size        = 0;
+
+intx          os::win32::_os_thread_limit    = 0;
 volatile intx os::win32::_os_thread_count    = 0;
 
-bool   os::win32::_is_nt              = false;
-bool   os::win32::_is_windows_2003    = false;
-bool   os::win32::_is_windows_server  = false;
+bool   os::win32::_is_nt                     = false;
+bool   os::win32::_is_windows_2003           = false;
+bool   os::win32::_is_windows_server         = false;
 
 // 6573254
 // Currently, the bug is observed across all the supported Windows releases,
 // including the latest one (as of this writing - Windows Server 2012 R2)
-bool   os::win32::_has_exit_bug       = true;
-bool   os::win32::_has_performance_count = 0;
+bool   os::win32::_has_exit_bug              = true;
+bool   os::win32::_has_performance_count     = 0;
 
 void os::win32::initialize_system_info() {
   SYSTEM_INFO si;
@@ -3649,27 +3645,27 @@
   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
   GetVersionEx((OSVERSIONINFO*)&oi);
   switch (oi.dwPlatformId) {
-    case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
-    case VER_PLATFORM_WIN32_NT:
-      _is_nt = true;
-      {
-        int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
-        if (os_vers == 5002) {
-          _is_windows_2003 = true;
-        }
-        if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
+  case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
+  case VER_PLATFORM_WIN32_NT:
+    _is_nt = true;
+    {
+      int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
+      if (os_vers == 5002) {
+        _is_windows_2003 = true;
+      }
+      if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
           oi.wProductType == VER_NT_SERVER) {
-            _is_windows_server = true;
-        }
+        _is_windows_server = true;
       }
-      break;
-    default: fatal("Unknown platform");
+    }
+    break;
+  default: fatal("Unknown platform");
   }
 
   _default_stack_size = os::current_stack_size();
   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
-    "stack size not a multiple of page size");
+         "stack size not a multiple of page size");
 
   initialize_performance_counter();
 
@@ -3684,7 +3680,8 @@
 }
 
 
-HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) {
+HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
+                                      int ebuflen) {
   char path[MAX_PATH];
   DWORD size;
   DWORD pathLen = (DWORD)sizeof(path);
@@ -3695,7 +3692,7 @@
   assert(strchr(name, ':') == NULL, "path not allowed");
   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
     jio_snprintf(ebuf, ebuflen,
-      "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
+                 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
     return NULL;
   }
 
@@ -3718,7 +3715,7 @@
   }
 
   jio_snprintf(ebuf, ebuflen,
-    "os::win32::load_windows_dll() cannot load %s from system directories.", name);
+               "os::win32::load_windows_dll() cannot load %s from system directories.", name);
   return NULL;
 }
 
@@ -3765,17 +3762,17 @@
   }
 
   switch (what) {
-    case EPT_THREAD:
-      _endthreadex((unsigned)exit_code);
-      break;
-
-    case EPT_PROCESS:
-      ::exit(exit_code);
-      break;
-
-    case EPT_PROCESS_DIE:
-      _exit(exit_code);
-      break;
+  case EPT_THREAD:
+    _endthreadex((unsigned)exit_code);
+    break;
+
+  case EPT_PROCESS:
+    ::exit(exit_code);
+    break;
+
+  case EPT_PROCESS_DIE:
+    _exit(exit_code);
+    break;
   }
 
   // should not reach here
@@ -3870,11 +3867,11 @@
 
   // This may be overridden later when argument processing is done.
   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
-    os::win32::is_windows_2003());
+                os::win32::is_windows_2003());
 
   // Initialize main_process and main_thread
   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
- if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
+  if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
     fatal("DuplicateHandle failed\n");
   }
@@ -3902,8 +3899,10 @@
   os::set_polling_page(polling_page);
 
 #ifndef PRODUCT
-  if (Verbose && PrintMiscellaneous)
-    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
+  if (Verbose && PrintMiscellaneous) {
+    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
+               (intptr_t)polling_page);
+  }
 #endif
 
   if (!UseMembar) {
@@ -3916,8 +3915,10 @@
     os::set_memory_serialize_page(mem_serialize_page);
 
 #ifndef PRODUCT
-    if (Verbose && PrintMiscellaneous)
-      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
+    if (Verbose && PrintMiscellaneous) {
+      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
+                 (intptr_t)mem_serialize_page);
+    }
 #endif
   }
 
@@ -3957,7 +3958,7 @@
   // class initialization depending on 32 or 64 bit VM.
   size_t min_stack_allowed =
             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
-            2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
+                     2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
   if (actual_reserve_size < min_stack_allowed) {
     tty->print_cr("\nThe stack size specified is too small, "
                   "Specify at least %dk",
@@ -4032,16 +4033,20 @@
 // Mark the polling page as unreadable
 void os::make_polling_page_unreadable(void) {
   DWORD old_status;
-  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status))
+  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
+                      PAGE_NOACCESS, &old_status)) {
     fatal("Could not disable polling page");
-};
+  }
+}
 
 // Mark the polling page as readable
 void os::make_polling_page_readable(void) {
   DWORD old_status;
-  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status))
+  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
+                      PAGE_READONLY, &old_status)) {
     fatal("Could not enable polling page");
-};
+  }
+}
 
 
 int os::stat(const char *path, struct stat *sbuf) {
@@ -4117,15 +4122,14 @@
     FILETIME KernelTime;
     FILETIME UserTime;
 
-    if (GetThreadTimes(thread->osthread()->thread_handle(),
-                    &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
+    if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
+                       &ExitTime, &KernelTime, &UserTime) == 0) {
       return -1;
-    else
-      if (user_sys_cpu_time) {
-        return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
-      } else {
-        return FT2INT64(UserTime) * 100;
-      }
+    } else if (user_sys_cpu_time) {
+      return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
+    } else {
+      return FT2INT64(UserTime) * 100;
+    }
   } else {
     return (jlong) timeGetTime() * 1000000;
   }
@@ -4153,11 +4157,12 @@
     FILETIME KernelTime;
     FILETIME UserTime;
 
-    if (GetThreadTimes(GetCurrentThread(),
-                    &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
+    if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
+                       &KernelTime, &UserTime) == 0) {
       return false;
-    else
+    } else {
       return true;
+    }
   } else {
     return false;
   }
@@ -4202,7 +4207,7 @@
 
   if (strlen(path) > MAX_PATH - 1) {
     errno = ENAMETOOLONG;
-          return -1;
+    return -1;
   }
   os::native_path(strcpy(pathbuf, path));
   return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
@@ -4250,39 +4255,36 @@
 // This method is a slightly reworked copy of JDK's sysNativePath
 // from src/windows/hpi/src/path_md.c
 
-/* Convert a pathname to native format.  On win32, this involves forcing all
-   separators to be '\\' rather than '/' (both are legal inputs, but Win95
-   sometimes rejects '/') and removing redundant separators.  The input path is
-   assumed to have been converted into the character encoding used by the local
-   system.  Because this might be a double-byte encoding, care is taken to
-   treat double-byte lead characters correctly.
-
-   This procedure modifies the given path in place, as the result is never
-   longer than the original.  There is no error return; this operation always
-   succeeds. */
+// Convert a pathname to native format.  On win32, this involves forcing all
+// separators to be '\\' rather than '/' (both are legal inputs, but Win95
+// sometimes rejects '/') and removing redundant separators.  The input path is
+// assumed to have been converted into the character encoding used by the local
+// system.  Because this might be a double-byte encoding, care is taken to
+// treat double-byte lead characters correctly.
+//
+// This procedure modifies the given path in place, as the result is never
+// longer than the original.  There is no error return; this operation always
+// succeeds.
 char * os::native_path(char *path) {
   char *src = path, *dst = path, *end = path;
-  char *colon = NULL;           /* If a drive specifier is found, this will
-                                        point to the colon following the drive
-                                        letter */
-
-  /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */
-  assert(((!::IsDBCSLeadByte('/'))
-    && (!::IsDBCSLeadByte('\\'))
-    && (!::IsDBCSLeadByte(':'))),
-    "Illegal lead byte");
-
-  /* Check for leading separators */
+  char *colon = NULL;  // If a drive specifier is found, this will
+                       // point to the colon following the drive letter
+
+  // Assumption: '/', '\\', ':', and drive letters are never lead bytes
+  assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
+          && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
+
+  // Check for leading separators
 #define isfilesep(c) ((c) == '/' || (c) == '\\')
   while (isfilesep(*src)) {
     src++;
   }
 
   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
-    /* Remove leading separators if followed by drive specifier.  This
-      hack is necessary to support file URLs containing drive
-      specifiers (e.g., "file://c:/path").  As a side effect,
-      "/c:/path" can be used as an alternative to "c:/path". */
+    // Remove leading separators if followed by drive specifier.  This
+    // hack is necessary to support file URLs containing drive
+    // specifiers (e.g., "file://c:/path").  As a side effect,
+    // "/c:/path" can be used as an alternative to "c:/path".
     *dst++ = *src++;
     colon = dst;
     *dst++ = ':';
@@ -4290,55 +4292,55 @@
   } else {
     src = path;
     if (isfilesep(src[0]) && isfilesep(src[1])) {
-      /* UNC pathname: Retain first separator; leave src pointed at
-         second separator so that further separators will be collapsed
-         into the second separator.  The result will be a pathname
-         beginning with "\\\\" followed (most likely) by a host name. */
+      // UNC pathname: Retain first separator; leave src pointed at
+      // second separator so that further separators will be collapsed
+      // into the second separator.  The result will be a pathname
+      // beginning with "\\\\" followed (most likely) by a host name.
       src = dst = path + 1;
-      path[0] = '\\';     /* Force first separator to '\\' */
+      path[0] = '\\';     // Force first separator to '\\'
     }
   }
 
   end = dst;
 
-  /* Remove redundant separators from remainder of path, forcing all
-      separators to be '\\' rather than '/'. Also, single byte space
-      characters are removed from the end of the path because those
-      are not legal ending characters on this operating system.
-  */
+  // Remove redundant separators from remainder of path, forcing all
+  // separators to be '\\' rather than '/'. Also, single byte space
+  // characters are removed from the end of the path because those
+  // are not legal ending characters on this operating system.
+  //
   while (*src != '\0') {
     if (isfilesep(*src)) {
       *dst++ = '\\'; src++;
       while (isfilesep(*src)) src++;
       if (*src == '\0') {
-        /* Check for trailing separator */
+        // Check for trailing separator
         end = dst;
-        if (colon == dst - 2) break;                      /* "z:\\" */
-        if (dst == path + 1) break;                       /* "\\" */
+        if (colon == dst - 2) break;  // "z:\\"
+        if (dst == path + 1) break;   // "\\"
         if (dst == path + 2 && isfilesep(path[0])) {
-          /* "\\\\" is not collapsed to "\\" because "\\\\" marks the
-            beginning of a UNC pathname.  Even though it is not, by
-            itself, a valid UNC pathname, we leave it as is in order
-            to be consistent with the path canonicalizer as well
-            as the win32 APIs, which treat this case as an invalid
-            UNC pathname rather than as an alias for the root
-            directory of the current drive. */
+          // "\\\\" is not collapsed to "\\" because "\\\\" marks the
+          // beginning of a UNC pathname.  Even though it is not, by
+          // itself, a valid UNC pathname, we leave it as is in order
+          // to be consistent with the path canonicalizer as well
+          // as the win32 APIs, which treat this case as an invalid
+          // UNC pathname rather than as an alias for the root
+          // directory of the current drive.
           break;
         }
-        end = --dst;  /* Path does not denote a root directory, so
-                                    remove trailing separator */
+        end = --dst;  // Path does not denote a root directory, so
+                      // remove trailing separator
         break;
       }
       end = dst;
     } else {
-      if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */
+      if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
         *dst++ = *src++;
         if (*src) *dst++ = *src++;
         end = dst;
-      } else {         /* Copy a single-byte character */
+      } else {  // Copy a single-byte character
         char c = *src++;
         *dst++ = c;
-        /* Space is not a legal ending character */
+        // Space is not a legal ending character
         if (c != ' ') end = dst;
       }
     }
@@ -4346,10 +4348,10 @@
 
   *end = '\0';
 
-  /* For "z:", add "." to work around a bug in the C runtime library */
+  // For "z:", add "." to work around a bug in the C runtime library
   if (colon == dst - 1) {
-          path[2] = '.';
-          path[3] = '\0';
+    path[2] = '.';
+    path[3] = '\0';
   }
 
   return path;
@@ -4369,7 +4371,7 @@
 
   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
-      return -1;
+    return -1;
   }
 
   if (::SetEndOfFile(h) == FALSE) {
@@ -4388,8 +4390,8 @@
   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
 
   if ((!::FlushFileBuffers(handle)) &&
-         (GetLastError() != ERROR_ACCESS_DENIED) ) {
-    /* from winerror.h */
+      (GetLastError() != ERROR_ACCESS_DENIED)) {
+    // from winerror.h
     return -1;
   }
   return 0;
@@ -4439,12 +4441,10 @@
 // from src/windows/hpi/src/sys_api_md.c
 
 static int nonSeekAvailable(int fd, long *pbytes) {
-  /* This is used for available on non-seekable devices
-    * (like both named and anonymous pipes, such as pipes
-    *  connected to an exec'd process).
-    * Standard Input is a special case.
-    *
-    */
+  // This is used for available on non-seekable devices
+  // (like both named and anonymous pipes, such as pipes
+  //  connected to an exec'd process).
+  // Standard Input is a special case.
   HANDLE han;
 
   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
@@ -4452,12 +4452,12 @@
   }
 
   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
-        /* PeekNamedPipe fails when at EOF.  In that case we
-         * simply make *pbytes = 0 which is consistent with the
-         * behavior we get on Solaris when an fd is at EOF.
-         * The only alternative is to raise an Exception,
-         * which isn't really warranted.
-         */
+    // PeekNamedPipe fails when at EOF.  In that case we
+    // simply make *pbytes = 0 which is consistent with the
+    // behavior we get on Solaris when an fd is at EOF.
+    // The only alternative is to raise an Exception,
+    // which isn't really warranted.
+    //
     if (::GetLastError() != ERROR_BROKEN_PIPE) {
       return FALSE;
     }
@@ -4473,25 +4473,25 @@
 
 static int stdinAvailable(int fd, long *pbytes) {
   HANDLE han;
-  DWORD numEventsRead = 0;      /* Number of events read from buffer */
-  DWORD numEvents = 0;  /* Number of events in buffer */
-  DWORD i = 0;          /* Loop index */
-  DWORD curLength = 0;  /* Position marker */
-  DWORD actualLength = 0;       /* Number of bytes readable */
-  BOOL error = FALSE;         /* Error holder */
-  INPUT_RECORD *lpBuffer;     /* Pointer to records of input events */
+  DWORD numEventsRead = 0;  // Number of events read from buffer
+  DWORD numEvents = 0;      // Number of events in buffer
+  DWORD i = 0;              // Loop index
+  DWORD curLength = 0;      // Position marker
+  DWORD actualLength = 0;   // Number of bytes readable
+  BOOL error = FALSE;       // Error holder
+  INPUT_RECORD *lpBuffer;   // Pointer to records of input events
 
   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
-        return FALSE;
-  }
-
-  /* Construct an array of input records in the console buffer */
+    return FALSE;
+  }
+
+  // Construct an array of input records in the console buffer
   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
   if (error == 0) {
     return nonSeekAvailable(fd, pbytes);
   }
 
-  /* lpBuffer must fit into 64K or else PeekConsoleInput fails */
+  // lpBuffer must fit into 64K or else PeekConsoleInput fails
   if (numEvents > MAX_INPUT_EVENTS) {
     numEvents = MAX_INPUT_EVENTS;
   }
@@ -4507,7 +4507,7 @@
     return FALSE;
   }
 
-  /* Examine input records for the number of bytes available */
+  // Examine input records for the number of bytes available
   for (i=0; i<numEvents; i++) {
     if (lpBuffer[i].EventType == KEY_EVENT) {
 
@@ -4533,8 +4533,8 @@
 
 // Map a block of memory.
 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
-                     char *addr, size_t bytes, bool read_only,
-                     bool allow_exec) {
+                        char *addr, size_t bytes, bool read_only,
+                        bool allow_exec) {
   HANDLE hFile;
   char* base;
 
@@ -4589,7 +4589,7 @@
     }
   } else {
     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
-                                    NULL /*file_name*/);
+                                    NULL /* file_name */);
     if (hMap == NULL) {
       if (PrintMiscellaneous && Verbose) {
         DWORD err = GetLastError();
@@ -4653,8 +4653,8 @@
 
 // Remap a block of memory.
 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
-                       char *addr, size_t bytes, bool read_only,
-                       bool allow_exec) {
+                          char *addr, size_t bytes, bool read_only,
+                          bool allow_exec) {
   // This OS does not allow existing memory maps to be remapped so we
   // have to unmap the memory before we remap it.
   if (!os::unmap_memory(addr, bytes)) {
@@ -4666,7 +4666,7 @@
   // code may be able to access an address that is no longer mapped.
 
   return os::map_memory(fd, file_name, file_offset, addr, bytes,
-           read_only, allow_exec);
+                        read_only, allow_exec);
 }
 
 
@@ -4702,7 +4702,7 @@
     }
   } else {
     jio_fprintf(stderr,
-      "Could not open pause file '%s', continuing immediately.\n", filename);
+                "Could not open pause file '%s', continuing immediately.\n", filename);
   }
 }
 
@@ -4710,17 +4710,16 @@
   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
 }
 
-/*
- * See the caveats for this class in os_windows.hpp
- * Protects the callback call so that raised OS EXCEPTIONS causes a jump back
- * into this method and returns false. If no OS EXCEPTION was raised, returns
- * true.
- * The callback is supposed to provide the method that should be protected.
- */
+// See the caveats for this class in os_windows.hpp
+// Protects the callback call so that raised OS EXCEPTIONS causes a jump back
+// into this method and returns false. If no OS EXCEPTION was raised, returns
+// true.
+// The callback is supposed to provide the method that should be protected.
+//
 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
-      "crash_protection already set?");
+         "crash_protection already set?");
 
   bool success = true;
   __try {
@@ -4785,86 +4784,86 @@
 // Another possible encoding of _Event would be
 // with explicit "PARKED" and "SIGNALED" bits.
 
-int os::PlatformEvent::park (jlong Millis) {
-    guarantee(_ParkHandle != NULL , "Invariant");
-    guarantee(Millis > 0          , "Invariant");
-    int v;
-
-    // CONSIDER: defer assigning a CreateEvent() handle to the Event until
-    // the initial park() operation.
-
-    for (;;) {
-        v = _Event;
-        if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+int os::PlatformEvent::park(jlong Millis) {
+  guarantee(_ParkHandle != NULL , "Invariant");
+  guarantee(Millis > 0          , "Invariant");
+  int v;
+
+  // CONSIDER: defer assigning a CreateEvent() handle to the Event until
+  // the initial park() operation.
+
+  for (;;) {
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+  }
+  guarantee((v == 0) || (v == 1), "invariant");
+  if (v != 0) return OS_OK;
+
+  // Do this the hard way by blocking ...
+  // TODO: consider a brief spin here, gated on the success of recent
+  // spin attempts by this thread.
+  //
+  // We decompose long timeouts into series of shorter timed waits.
+  // Evidently large timo values passed in WaitForSingleObject() are problematic on some
+  // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
+  // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
+  // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
+  // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
+  // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
+  // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
+  // for the already waited time.  This policy does not admit any new outcomes.
+  // In the future, however, we might want to track the accumulated wait time and
+  // adjust Millis accordingly if we encounter a spurious wakeup.
+
+  const int MAXTIMEOUT = 0x10000000;
+  DWORD rv = WAIT_TIMEOUT;
+  while (_Event < 0 && Millis > 0) {
+    DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
+    if (Millis > MAXTIMEOUT) {
+      prd = MAXTIMEOUT;
     }
-    guarantee((v == 0) || (v == 1), "invariant");
-    if (v != 0) return OS_OK;
-
-    // Do this the hard way by blocking ...
-    // TODO: consider a brief spin here, gated on the success of recent
-    // spin attempts by this thread.
-    //
-    // We decompose long timeouts into series of shorter timed waits.
-    // Evidently large timo values passed in WaitForSingleObject() are problematic on some
-    // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
-    // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
-    // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
-    // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
-    // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
-    // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
-    // for the already waited time.  This policy does not admit any new outcomes.
-    // In the future, however, we might want to track the accumulated wait time and
-    // adjust Millis accordingly if we encounter a spurious wakeup.
-
-    const int MAXTIMEOUT = 0x10000000;
-    DWORD rv = WAIT_TIMEOUT;
-    while (_Event < 0 && Millis > 0) {
-       DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
-       if (Millis > MAXTIMEOUT) {
-          prd = MAXTIMEOUT;
-       }
-       rv = ::WaitForSingleObject(_ParkHandle, prd);
-       assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
-       if (rv == WAIT_TIMEOUT) {
-           Millis -= prd;
-       }
+    rv = ::WaitForSingleObject(_ParkHandle, prd);
+    assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
+    if (rv == WAIT_TIMEOUT) {
+      Millis -= prd;
     }
-    v = _Event;
-    _Event = 0;
-    // see comment at end of os::PlatformEvent::park() below:
-    OrderAccess::fence();
-    // If we encounter a nearly simultanous timeout expiry and unpark()
-    // we return OS_OK indicating we awoke via unpark().
-    // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
-    return (v >= 0) ? OS_OK : OS_TIMEOUT;
+  }
+  v = _Event;
+  _Event = 0;
+  // see comment at end of os::PlatformEvent::park() below:
+  OrderAccess::fence();
+  // If we encounter a nearly simultanous timeout expiry and unpark()
+  // we return OS_OK indicating we awoke via unpark().
+  // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
+  return (v >= 0) ? OS_OK : OS_TIMEOUT;
 }
 
 void os::PlatformEvent::park() {
-    guarantee(_ParkHandle != NULL, "Invariant");
-    // Invariant: Only the thread associated with the Event/PlatformEvent
-    // may call park().
-    int v;
-    for (;;) {
-        v = _Event;
-        if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
-    }
-    guarantee((v == 0) || (v == 1), "invariant");
-    if (v != 0) return;
-
-    // Do this the hard way by blocking ...
-    // TODO: consider a brief spin here, gated on the success of recent
-    // spin attempts by this thread.
-    while (_Event < 0) {
-       DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
-       assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
-    }
-
-    // Usually we'll find _Event == 0 at this point, but as
-    // an optional optimization we clear it, just in case can
-    // multiple unpark() operations drove _Event up to 1.
-    _Event = 0;
-    OrderAccess::fence();
-    guarantee(_Event >= 0, "invariant");
+  guarantee(_ParkHandle != NULL, "Invariant");
+  // Invariant: Only the thread associated with the Event/PlatformEvent
+  // may call park().
+  int v;
+  for (;;) {
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+  }
+  guarantee((v == 0) || (v == 1), "invariant");
+  if (v != 0) return;
+
+  // Do this the hard way by blocking ...
+  // TODO: consider a brief spin here, gated on the success of recent
+  // spin attempts by this thread.
+  while (_Event < 0) {
+    DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
+    assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
+  }
+
+  // Usually we'll find _Event == 0 at this point, but as
+  // an optional optimization we clear it, just in case can
+  // multiple unpark() operations drove _Event up to 1.
+  _Event = 0;
+  OrderAccess::fence();
+  guarantee(_Event >= 0, "invariant");
 }
 
 void os::PlatformEvent::unpark() {
@@ -4893,32 +4892,28 @@
 // JSR166
 // -------------------------------------------------------
 
-/*
- * The Windows implementation of Park is very straightforward: Basic
- * operations on Win32 Events turn out to have the right semantics to
- * use them directly. We opportunistically resuse the event inherited
- * from Monitor.
- */
-
+// The Windows implementation of Park is very straightforward: Basic
+// operations on Win32 Events turn out to have the right semantics to
+// use them directly. We opportunistically resuse the event inherited
+// from Monitor.
 
 void Parker::park(bool isAbsolute, jlong time) {
   guarantee(_ParkEvent != NULL, "invariant");
   // First, demultiplex/decode time arguments
   if (time < 0) { // don't wait
     return;
-  }
-  else if (time == 0 && !isAbsolute) {
+  } else if (time == 0 && !isAbsolute) {
     time = INFINITE;
-  }
-  else if  (isAbsolute) {
+  } else if (isAbsolute) {
     time -= os::javaTimeMillis(); // convert to relative time
-    if (time <= 0) // already elapsed
+    if (time <= 0) {  // already elapsed
       return;
-  }
-  else { // relative
-    time /= 1000000; // Must coarsen from nanos to millis
-    if (time == 0)   // Wait for the minimal time unit if zero
+    }
+  } else { // relative
+    time /= 1000000;  // Must coarsen from nanos to millis
+    if (time == 0) {  // Wait for the minimal time unit if zero
       time = 1;
+    }
   }
 
   JavaThread* thread = (JavaThread*)(Thread::current());
@@ -4927,11 +4922,10 @@
 
   // Don't wait if interrupted or already triggered
   if (Thread::is_interrupted(thread, false) ||
-    WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
+      WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
     ResetEvent(_ParkEvent);
     return;
-  }
-  else {
+  } else {
     ThreadBlockInVM tbivm(jt);
     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
     jt->set_suspend_equivalent();
@@ -5040,8 +5034,9 @@
     PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
     address addr = (address) exceptionRecord->ExceptionInformation[1];
 
-    if (os::is_memory_serialize_page(thread, addr))
+    if (os::is_memory_serialize_page(thread, addr)) {
       return EXCEPTION_CONTINUE_EXECUTION;
+    }
   }
 
   return EXCEPTION_CONTINUE_SEARCH;
@@ -5055,13 +5050,13 @@
 
   if (!os::WinSock2Dll::WinSock2Available()) {
     jio_fprintf(stderr, "Could not load Winsock (error: %d)\n",
-      ::GetLastError());
+                ::GetLastError());
     return JNI_ERR;
   }
 
   if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
-      ::GetLastError());
+                ::GetLastError());
     return JNI_ERR;
   }
   return JNI_OK;
@@ -5161,9 +5156,9 @@
 
 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
 #if defined(IA32)
-#  define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
+  #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
 #elif defined (AMD64)
-#  define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
+  #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
 #endif
 
 // returns true if thread could be suspended,
@@ -5187,13 +5182,13 @@
 
 // retrieve a suspend/resume context capable handle
 // from the tid. Caller validates handle return value.
-void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) {
+void get_thread_handle_for_extended_context(HANDLE* h,
+                                            OSThread::thread_id_t tid) {
   if (h != NULL) {
     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
   }
 }
 
-//
 // Thread sampling implementation
 //
 void os::SuspendedThreadTask::internal_do_task() {
@@ -5227,9 +5222,9 @@
 
 // Kernel32 API
 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);
-typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
-typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG);
-typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG);
+typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn)(HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
+typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn)(PULONG);
+typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn)(UCHAR, PULONGLONG);
 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG);
 
 GetLargePageMinimum_Fn      os::Kernel32Dll::_GetLargePageMinimum = NULL;
@@ -5242,7 +5237,7 @@
 BOOL                        os::Kernel32Dll::initialized = FALSE;
 SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
   assert(initialized && _GetLargePageMinimum != NULL,
-    "GetLargePageMinimumAvailable() not yet called");
+         "GetLargePageMinimumAvailable() not yet called");
   return _GetLargePageMinimum();
 }
 
@@ -5260,39 +5255,44 @@
   return _VirtualAllocExNuma != NULL;
 }
 
-LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) {
+LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr,
+                                           SIZE_T bytes, DWORD flags,
+                                           DWORD prot, DWORD node) {
   assert(initialized && _VirtualAllocExNuma != NULL,
-    "NUMACallsAvailable() not yet called");
+         "NUMACallsAvailable() not yet called");
 
   return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node);
 }
 
 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) {
   assert(initialized && _GetNumaHighestNodeNumber != NULL,
-    "NUMACallsAvailable() not yet called");
+         "NUMACallsAvailable() not yet called");
 
   return _GetNumaHighestNodeNumber(ptr_highest_node_number);
 }
 
-BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) {
+BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node,
+                                               PULONGLONG proc_mask) {
   assert(initialized && _GetNumaNodeProcessorMask != NULL,
-    "NUMACallsAvailable() not yet called");
+         "NUMACallsAvailable() not yet called");
 
   return _GetNumaNodeProcessorMask(node, proc_mask);
 }
 
 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip,
-  ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) {
-    if (!initialized) {
-      initialize();
-    }
-
-    if (_RtlCaptureStackBackTrace != NULL) {
-      return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture,
-        BackTrace, BackTraceHash);
-    } else {
-      return 0;
-    }
+                                                 ULONG FrameToCapture,
+                                                 PVOID* BackTrace,
+                                                 PULONG BackTraceHash) {
+  if (!initialized) {
+    initialize();
+  }
+
+  if (_RtlCaptureStackBackTrace != NULL) {
+    return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture,
+                                     BackTrace, BackTraceHash);
+  } else {
+    return 0;
+  }
 }
 
 void os::Kernel32Dll::initializeCommon() {
@@ -5326,20 +5326,23 @@
   return true;
 }
 
-  // Help tools
+// Help tools
 inline BOOL os::Kernel32Dll::HelpToolsAvailable() {
   return true;
 }
 
-inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
+inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,
+                                                        DWORD th32ProcessId) {
   return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
 }
 
-inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
+inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,
+                                           LPMODULEENTRY32 lpme) {
   return ::Module32First(hSnapshot, lpme);
 }
 
-inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
+inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,
+                                          LPMODULEENTRY32 lpme) {
   return ::Module32Next(hSnapshot, lpme);
 }
 
@@ -5353,15 +5356,23 @@
 }
 
 // PSAPI API
-inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
+inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess,
+                                             HMODULE *lpModule, DWORD cb,
+                                             LPDWORD lpcbNeeded) {
   return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
 }
 
-inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
+inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess,
+                                               HMODULE hModule,
+                                               LPTSTR lpFilename,
+                                               DWORD nSize) {
   return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
 }
 
-inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
+inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess,
+                                               HMODULE hModule,
+                                               LPMODULEINFO lpmodinfo,
+                                               DWORD cb) {
   return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
 }
 
@@ -5371,7 +5382,8 @@
 
 
 // WinSock2 API
-inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
+inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested,
+                                        LPWSADATA lpWSAData) {
   return ::WSAStartup(wVersionRequested, lpWSAData);
 }
 
@@ -5385,18 +5397,24 @@
 
 // Advapi API
 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
-   BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
-   PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
-     return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
-       BufferLength, PreviousState, ReturnLength);
-}
-
-inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
-  PHANDLE TokenHandle) {
-    return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
-}
-
-inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
+                                                   BOOL DisableAllPrivileges,
+                                                   PTOKEN_PRIVILEGES NewState,
+                                                   DWORD BufferLength,
+                                                   PTOKEN_PRIVILEGES PreviousState,
+                                                   PDWORD ReturnLength) {
+  return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
+                                 BufferLength, PreviousState, ReturnLength);
+}
+
+inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle,
+                                              DWORD DesiredAccess,
+                                              PHANDLE TokenHandle) {
+  return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
+}
+
+inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName,
+                                                  LPCTSTR lpName,
+                                                  PLUID lpLuid) {
   return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
 }
 
@@ -5476,9 +5494,9 @@
 #else
 // Kernel32 API
 typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
-typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD);
-typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32);
-typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32);
+typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD, DWORD);
+typedef BOOL (WINAPI* Module32First_Fn)(HANDLE, LPMODULEENTRY32);
+typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE, LPMODULEENTRY32);
 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO);
 
 SwitchToThread_Fn           os::Kernel32Dll::_SwitchToThread = NULL;
@@ -5506,7 +5524,7 @@
 
 BOOL os::Kernel32Dll::SwitchToThread() {
   assert(initialized && _SwitchToThread != NULL,
-    "SwitchToThreadAvailable() not yet called");
+         "SwitchToThreadAvailable() not yet called");
   return _SwitchToThread();
 }
 
@@ -5528,23 +5546,25 @@
          _Module32Next != NULL;
 }
 
-HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
+HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,
+                                                 DWORD th32ProcessId) {
   assert(initialized && _CreateToolhelp32Snapshot != NULL,
-    "HelpToolsAvailable() not yet called");
+         "HelpToolsAvailable() not yet called");
 
   return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
 }
 
 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
   assert(initialized && _Module32First != NULL,
-    "HelpToolsAvailable() not yet called");
+         "HelpToolsAvailable() not yet called");
 
   return _Module32First(hSnapshot, lpme);
 }
 
-inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
+inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,
+                                          LPMODULEENTRY32 lpme) {
   assert(initialized && _Module32Next != NULL,
-    "HelpToolsAvailable() not yet called");
+         "HelpToolsAvailable() not yet called");
 
   return _Module32Next(hSnapshot, lpme);
 }
@@ -5559,7 +5579,7 @@
 
 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
   assert(initialized && _GetNativeSystemInfo != NULL,
-    "GetNativeSystemInfoAvailable() not yet called");
+         "GetNativeSystemInfoAvailable() not yet called");
 
   _GetNativeSystemInfo(lpSystemInfo);
 }
@@ -5568,7 +5588,7 @@
 
 
 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD);
-typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);;
+typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);
 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
 
 EnumProcessModules_Fn   os::PSApiDll::_EnumProcessModules = NULL;
@@ -5581,11 +5601,11 @@
     HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0);
     if (handle != NULL) {
       _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle,
-        "EnumProcessModules");
+                                                                    "EnumProcessModules");
       _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle,
-        "GetModuleFileNameExA");
+                                                                      "GetModuleFileNameExA");
       _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle,
-        "GetModuleInformation");
+                                                                        "GetModuleInformation");
     }
     initialized = TRUE;
   }
@@ -5593,21 +5613,24 @@
 
 
 
-BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
+BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule,
+                                      DWORD cb, LPDWORD lpcbNeeded) {
   assert(initialized && _EnumProcessModules != NULL,
-    "PSApiAvailable() not yet called");
+         "PSApiAvailable() not yet called");
   return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
 }
 
-DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
+DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule,
+                                        LPTSTR lpFilename, DWORD nSize) {
   assert(initialized && _GetModuleFileNameEx != NULL,
-    "PSApiAvailable() not yet called");
+         "PSApiAvailable() not yet called");
   return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
 }
 
-BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
+BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule,
+                                        LPMODULEINFO lpmodinfo, DWORD cb) {
   assert(initialized && _GetModuleInformation != NULL,
-    "PSApiAvailable() not yet called");
+         "PSApiAvailable() not yet called");
   return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
 }
 
@@ -5643,13 +5666,13 @@
 
 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
   assert(initialized && _WSAStartup != NULL,
-    "WinSock2Available() not yet called");
+         "WinSock2Available() not yet called");
   return _WSAStartup(wVersionRequested, lpWSAData);
 }
 
 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
   assert(initialized && _gethostbyname != NULL,
-    "WinSock2Available() not yet called");
+         "WinSock2Available() not yet called");
   return _gethostbyname(name);
 }
 
@@ -5675,35 +5698,40 @@
     HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0);
     if (handle != NULL) {
       _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle,
-        "AdjustTokenPrivileges");
+                                                                          "AdjustTokenPrivileges");
       _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle,
-        "OpenProcessToken");
+                                                                "OpenProcessToken");
       _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle,
-        "LookupPrivilegeValueA");
+                                                                        "LookupPrivilegeValueA");
     }
     initialized = TRUE;
   }
 }
 
 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
-   BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
-   PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
-   assert(initialized && _AdjustTokenPrivileges != NULL,
-     "AdvapiAvailable() not yet called");
-   return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
-       BufferLength, PreviousState, ReturnLength);
-}
-
-BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
-  PHANDLE TokenHandle) {
-   assert(initialized && _OpenProcessToken != NULL,
-     "AdvapiAvailable() not yet called");
-    return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
-}
-
-BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
-   assert(initialized && _LookupPrivilegeValue != NULL,
-     "AdvapiAvailable() not yet called");
+                                            BOOL DisableAllPrivileges,
+                                            PTOKEN_PRIVILEGES NewState,
+                                            DWORD BufferLength,
+                                            PTOKEN_PRIVILEGES PreviousState,
+                                            PDWORD ReturnLength) {
+  assert(initialized && _AdjustTokenPrivileges != NULL,
+         "AdvapiAvailable() not yet called");
+  return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
+                                BufferLength, PreviousState, ReturnLength);
+}
+
+BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle,
+                                       DWORD DesiredAccess,
+                                       PHANDLE TokenHandle) {
+  assert(initialized && _OpenProcessToken != NULL,
+         "AdvapiAvailable() not yet called");
+  return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
+}
+
+BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName,
+                                           LPCTSTR lpName, PLUID lpLuid) {
+  assert(initialized && _LookupPrivilegeValue != NULL,
+         "AdvapiAvailable() not yet called");
   return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
 }
 
@@ -5751,7 +5779,7 @@
   if (result == NULL) {
     if (VerboseInternalVMTests) {
       gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
-        large_allocation_size);
+                          large_allocation_size);
     }
   } else {
     os::release_memory_special(result, large_allocation_size);
@@ -5764,15 +5792,15 @@
     if (actual_location == NULL) {
       if (VerboseInternalVMTests) {
         gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
-          expected_location, large_allocation_size);
+                            expected_location, large_allocation_size);
       }
     } else {
       // release memory
       os::release_memory_special(actual_location, expected_allocation_size);
       // only now check, after releasing any memory to avoid any leaks.
       assert(actual_location == expected_location,
-        err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
-          expected_location, expected_allocation_size, actual_location));
+             err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
+             expected_location, expected_allocation_size, actual_location));
     }
   }
 
--- a/hotspot/src/share/vm/runtime/atomic.hpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/share/vm/runtime/atomic.hpp	Wed Sep 10 17:06:36 2014 -0700
@@ -74,12 +74,12 @@
   inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
   inline static void*    add_ptr(intptr_t add_value, volatile void*     dest);
   // See comment above about using jlong atomics on 32-bit platforms
-         static jlong    add    (jlong    add_value, volatile jlong*    dest);
+  static jlong           add    (jlong    add_value, volatile jlong*    dest);
 
   // Atomically increment location. inc*() provide:
   // <fence> increment-dest <membar StoreLoad|StoreStore>
   inline static void inc    (volatile jint*     dest);
-         static void inc    (volatile jshort*   dest);
+  static void        inc    (volatile jshort*   dest);
   inline static void inc    (volatile size_t*   dest);
   inline static void inc_ptr(volatile intptr_t* dest);
   inline static void inc_ptr(volatile void*     dest);
@@ -87,7 +87,7 @@
   // Atomically decrement a location. dec*() provide:
   // <fence> decrement-dest <membar StoreLoad|StoreStore>
   inline static void dec    (volatile jint*     dest);
-         static void dec    (volatile jshort*    dest);
+  static void        dec    (volatile jshort*   dest);
   inline static void dec    (volatile size_t*   dest);
   inline static void dec_ptr(volatile intptr_t* dest);
   inline static void dec_ptr(volatile void*     dest);
@@ -95,27 +95,22 @@
   // Performs atomic exchange of *dest with exchange_value. Returns old
   // prior value of *dest. xchg*() provide:
   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
-  inline static jint         xchg(jint         exchange_value, volatile jint*         dest);
-         static unsigned int xchg(unsigned int exchange_value, volatile unsigned int* dest);
-
-  inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
-  inline static void*    xchg_ptr(void*    exchange_value, volatile void*   dest);
+  inline static jint     xchg    (jint         exchange_value, volatile jint*         dest);
+  static unsigned int    xchg    (unsigned int exchange_value, volatile unsigned int* dest);
+  inline static intptr_t xchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest);
+  inline static void*    xchg_ptr(void*        exchange_value, volatile void*         dest);
 
   // Performs atomic compare of *dest and compare_value, and exchanges
   // *dest with exchange_value if the comparison succeeded. Returns prior
   // value of *dest. cmpxchg*() provide:
   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
-         static jbyte    cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value);
-  inline static jint     cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value);
+  static jbyte           cmpxchg    (jbyte        exchange_value, volatile jbyte*        dest, jbyte        compare_value);
+  inline static jint     cmpxchg    (jint         exchange_value, volatile jint*         dest, jint         compare_value);
   // See comment above about using jlong atomics on 32-bit platforms
-  inline static jlong    cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value);
-
-         static unsigned int cmpxchg(unsigned int exchange_value,
-                                     volatile unsigned int* dest,
-                                     unsigned int compare_value);
-
-  inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
-  inline static void*    cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value);
+  inline static jlong    cmpxchg    (jlong        exchange_value, volatile jlong*        dest, jlong        compare_value);
+  static unsigned int    cmpxchg    (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value);
+  inline static intptr_t cmpxchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest, intptr_t     compare_value);
+  inline static void*    cmpxchg_ptr(void*        exchange_value, volatile void*         dest, void*        compare_value);
 };
 
 // To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially
@@ -129,12 +124,12 @@
 //  );
 
 #ifdef VM_LITTLE_ENDIAN
-#define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \
-    non_atomic_decl; \
+  #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
+    non_atomic_decl;                                       \
     atomic_decl
 #else
-#define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \
-    atomic_decl; \
+  #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
+    atomic_decl;                                           \
     non_atomic_decl
 #endif
 
--- a/hotspot/src/share/vm/runtime/mutex.cpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/share/vm/runtime/mutex.cpp	Wed Sep 10 17:06:36 2014 -0700
@@ -267,15 +267,24 @@
 // CASPTR() uses the canonical argument order that dominates in the literature.
 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
 
-#define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
+#define CASPTR(a, c, s)  \
+  intptr_t(Atomic::cmpxchg_ptr((void *)(s), (void *)(a), (void *)(c)))
 #define UNS(x) (uintptr_t(x))
-#define TRACE(m) { static volatile int ctr = 0; int x = ++ctr; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
+#define TRACE(m)                   \
+  {                                \
+    static volatile int ctr = 0;   \
+    int x = ++ctr;                 \
+    if ((x & (x - 1)) == 0) {      \
+      ::printf("%d:%s\n", x, #m);  \
+      ::fflush(stdout);            \
+    }                              \
+  }
 
 // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
 // Bijective except for the trailing mask operation.
 // Useful for spin loops as the compiler can't optimize it away.
 
-static inline jint MarsagliaXORV (jint x) {
+static inline jint MarsagliaXORV(jint x) {
   if (x == 0) x = 1|os::random();
   x ^= x << 6;
   x ^= ((unsigned)x) >> 21;
@@ -283,7 +292,7 @@
   return x & 0x7FFFFFFF;
 }
 
-static int Stall (int its) {
+static int Stall(int its) {
   static volatile jint rv = 1;
   volatile int OnFrame = 0;
   jint v = rv ^ UNS(OnFrame);
@@ -341,7 +350,7 @@
 // Clamp spinning at approximately 1/2 of a context-switch round-trip.
 // See synchronizer.cpp for details and rationale.
 
-int Monitor::TrySpin (Thread * const Self) {
+int Monitor::TrySpin(Thread * const Self) {
   if (TryLock())    return 1;
   if (!os::is_MP()) return 0;
 
@@ -403,11 +412,11 @@
   }
 }
 
-static int ParkCommon (ParkEvent * ev, jlong timo) {
+static int ParkCommon(ParkEvent * ev, jlong timo) {
   // Diagnostic support - periodically unwedge blocked threads
   intx nmt = NativeMonitorTimeout;
   if (nmt > 0 && (nmt < timo || timo <= 0)) {
-     timo = nmt;
+    timo = nmt;
   }
   int err = OS_OK;
   if (0 == timo) {
@@ -418,7 +427,7 @@
   return err;
 }
 
-inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
+inline int Monitor::AcquireOrPush(ParkEvent * ESelf) {
   intptr_t v = _LockWord.FullWord;
   for (;;) {
     if ((v & _LBIT) == 0) {
@@ -443,7 +452,7 @@
 // Note that ILock and IWait do *not* access _owner.
 // _owner is a higher-level logical concept.
 
-void Monitor::ILock (Thread * Self) {
+void Monitor::ILock(Thread * Self) {
   assert(_OnDeck != Self->_MutexEvent, "invariant");
 
   if (TryFast()) {
@@ -514,7 +523,7 @@
   goto Exeunt;
 }
 
-void Monitor::IUnlock (bool RelaxAssert) {
+void Monitor::IUnlock(bool RelaxAssert) {
   assert(ILocked(), "invariant");
   // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
   // before the store that releases the lock.  Crucially, all the stores and loads in the
@@ -589,8 +598,8 @@
     _EntryList = w->ListNext;
     // as a diagnostic measure consider setting w->_ListNext = BAD
     assert(UNS(_OnDeck) == _LBIT, "invariant");
-    _OnDeck = w;           // pass OnDeck to w.
-                            // w will clear OnDeck once it acquires the outer lock
+    _OnDeck = w;  // pass OnDeck to w.
+                  // w will clear OnDeck once it acquires the outer lock
 
     // Another optional optimization ...
     // For heavily contended locks it's not uncommon that some other
@@ -724,7 +733,7 @@
   return true;
 }
 
-int Monitor::IWait (Thread * Self, jlong timo) {
+int Monitor::IWait(Thread * Self, jlong timo) {
   assert(ILocked(), "invariant");
 
   // Phases:
@@ -885,7 +894,7 @@
 // sneaking or dependence on any any clever invariants or subtle implementation properties
 // of Mutex-Monitor and instead directly address the underlying design flaw.
 
-void Monitor::lock (Thread * Self) {
+void Monitor::lock(Thread * Self) {
 #ifdef CHECK_UNHANDLED_OOPS
   // Clear unhandled oops so we get a crash right away.  Only clear for non-vm
   // or GC threads.
@@ -895,7 +904,7 @@
 #endif // CHECK_UNHANDLED_OOPS
 
   debug_only(check_prelock_state(Self));
-  assert(_owner != Self              , "invariant");
+  assert(_owner != Self, "invariant");
   assert(_OnDeck != Self->_MutexEvent, "invariant");
 
   if (TryFast()) {
@@ -943,7 +952,7 @@
 // that is guaranteed not to block while running inside the VM. If this is called with
 // thread state set to be in VM, the safepoint synchronization code will deadlock!
 
-void Monitor::lock_without_safepoint_check (Thread * Self) {
+void Monitor::lock_without_safepoint_check(Thread * Self) {
   assert(_owner != Self, "invariant");
   ILock(Self);
   assert(_owner == NULL, "invariant");
@@ -983,8 +992,8 @@
 }
 
 void Monitor::unlock() {
-  assert(_owner  == Thread::current(), "invariant");
-  assert(_OnDeck != Thread::current()->_MutexEvent , "invariant");
+  assert(_owner == Thread::current(), "invariant");
+  assert(_OnDeck != Thread::current()->_MutexEvent, "invariant");
   set_owner(NULL);
   if (_snuck) {
     assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
@@ -1071,7 +1080,8 @@
   IUnlock(false);
 }
 
-bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
+bool Monitor::wait(bool no_safepoint_check, long timeout,
+                   bool as_suspend_equivalent) {
   Thread * const Self = Thread::current();
   assert(_owner == Self, "invariant");
   assert(ILocked(), "invariant");
@@ -1082,14 +1092,14 @@
   guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant");
 
   #ifdef ASSERT
-    Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
-    assert(least != this, "Specification of get_least_... call above");
-    if (least != NULL && least->rank() <= special) {
-      tty->print("Attempting to wait on monitor %s/%d while holding"
-                 " lock %s/%d -- possible deadlock",
-                 name(), rank(), least->name(), least->rank());
-      assert(false, "Shouldn't block(wait) while holding a lock of rank special");
-    }
+  Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
+  assert(least != this, "Specification of get_least_... call above");
+  if (least != NULL && least->rank() <= special) {
+    tty->print("Attempting to wait on monitor %s/%d while holding"
+               " lock %s/%d -- possible deadlock",
+               name(), rank(), least->name(), least->rank());
+    assert(false, "Shouldn't block(wait) while holding a lock of rank special");
+  }
   #endif // ASSERT
 
   int wait_status;
@@ -1140,7 +1150,7 @@
   assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "");
 }
 
-void Monitor::ClearMonitor (Monitor * m, const char *name) {
+void Monitor::ClearMonitor(Monitor * m, const char *name) {
   m->_owner             = NULL;
   m->_snuck             = false;
   if (name == NULL) {
@@ -1158,7 +1168,7 @@
 
 Monitor::Monitor() { ClearMonitor(this); }
 
-Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
+Monitor::Monitor(int Rank, const char * name, bool allow_vm_block) {
   ClearMonitor(this, name);
 #ifdef ASSERT
   _allow_vm_block  = allow_vm_block;
@@ -1170,11 +1180,11 @@
   assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "");
 }
 
-Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
+Mutex::Mutex(int Rank, const char * name, bool allow_vm_block) {
   ClearMonitor((Monitor *) this, name);
 #ifdef ASSERT
- _allow_vm_block   = allow_vm_block;
- _rank             = Rank;
+  _allow_vm_block   = allow_vm_block;
+  _rank             = Rank;
 #endif
 }
 
@@ -1247,8 +1257,9 @@
 
 bool Monitor::contains(Monitor* locks, Monitor * lock) {
   for (; locks != NULL; locks = locks->next()) {
-    if (locks == lock)
+    if (locks == lock) {
       return true;
+    }
   }
   return false;
 }
@@ -1279,40 +1290,40 @@
 
     // link "this" into the owned locks list
 
-    #ifdef ASSERT  // Thread::_owned_locks is under the same ifdef
-      Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
-                    // Mutex::set_owner_implementation is a friend of Thread
+#ifdef ASSERT  // Thread::_owned_locks is under the same ifdef
+    Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
+    // Mutex::set_owner_implementation is a friend of Thread
 
-      assert(this->rank() >= 0, "bad lock rank");
+    assert(this->rank() >= 0, "bad lock rank");
 
-      // Deadlock avoidance rules require us to acquire Mutexes only in
-      // a global total order. For example m1 is the lowest ranked mutex
-      // that the thread holds and m2 is the mutex the thread is trying
-      // to acquire, then  deadlock avoidance rules require that the rank
-      // of m2 be less  than the rank of m1.
-      // The rank Mutex::native  is an exception in that it is not subject
-      // to the verification rules.
-      // Here are some further notes relating to mutex acquisition anomalies:
-      // . under Solaris, the interrupt lock gets acquired when doing
-      //   profiling, so any lock could be held.
-      // . it is also ok to acquire Safepoint_lock at the very end while we
-      //   already hold Terminator_lock - may happen because of periodic safepoints
-      if (this->rank() != Mutex::native &&
-          this->rank() != Mutex::suspend_resume &&
-          locks != NULL && locks->rank() <= this->rank() &&
-          !SafepointSynchronize::is_at_safepoint() &&
-          this != Interrupt_lock && this != ProfileVM_lock &&
-          !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
-            SafepointSynchronize::is_synchronizing())) {
-        new_owner->print_owned_locks();
-        fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
-                      "possible deadlock", this->name(), this->rank(),
-                      locks->name(), locks->rank()));
-      }
+    // Deadlock avoidance rules require us to acquire Mutexes only in
+    // a global total order. For example m1 is the lowest ranked mutex
+    // that the thread holds and m2 is the mutex the thread is trying
+    // to acquire, then  deadlock avoidance rules require that the rank
+    // of m2 be less  than the rank of m1.
+    // The rank Mutex::native  is an exception in that it is not subject
+    // to the verification rules.
+    // Here are some further notes relating to mutex acquisition anomalies:
+    // . under Solaris, the interrupt lock gets acquired when doing
+    //   profiling, so any lock could be held.
+    // . it is also ok to acquire Safepoint_lock at the very end while we
+    //   already hold Terminator_lock - may happen because of periodic safepoints
+    if (this->rank() != Mutex::native &&
+        this->rank() != Mutex::suspend_resume &&
+        locks != NULL && locks->rank() <= this->rank() &&
+        !SafepointSynchronize::is_at_safepoint() &&
+        this != Interrupt_lock && this != ProfileVM_lock &&
+        !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
+        SafepointSynchronize::is_synchronizing())) {
+      new_owner->print_owned_locks();
+      fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
+                    "possible deadlock", this->name(), this->rank(),
+                    locks->name(), locks->rank()));
+    }
 
-      this->_next = new_owner->_owned_locks;
-      new_owner->_owned_locks = this;
-    #endif
+    this->_next = new_owner->_owned_locks;
+    new_owner->_owned_locks = this;
+#endif
 
   } else {
     // the thread is releasing this lock
@@ -1325,27 +1336,27 @@
 
     _owner = NULL; // set the owner
 
-    #ifdef ASSERT
-      Monitor *locks = old_owner->owned_locks();
+#ifdef ASSERT
+    Monitor *locks = old_owner->owned_locks();
 
-      // remove "this" from the owned locks list
+    // remove "this" from the owned locks list
 
-      Monitor *prev = NULL;
-      bool found = false;
-      for (; locks != NULL; prev = locks, locks = locks->next()) {
-        if (locks == this) {
-          found = true;
-          break;
-        }
+    Monitor *prev = NULL;
+    bool found = false;
+    for (; locks != NULL; prev = locks, locks = locks->next()) {
+      if (locks == this) {
+        found = true;
+        break;
       }
-      assert(found, "Removing a lock not owned");
-      if (prev == NULL) {
-        old_owner->_owned_locks = _next;
-      } else {
-        prev->_next = _next;
-      }
-      _next = NULL;
-    #endif
+    }
+    assert(found, "Removing a lock not owned");
+    if (prev == NULL) {
+      old_owner->_owned_locks = _next;
+    } else {
+      prev->_next = _next;
+    }
+    _next = NULL;
+#endif
   }
 }
 
@@ -1360,11 +1371,11 @@
                     name()));
     }
     debug_only(if (rank() != Mutex::special) \
-      thread->check_for_valid_safepoint_state(false);)
+               thread->check_for_valid_safepoint_state(false);)
   }
   if (thread->is_Watcher_thread()) {
     assert(!WatcherThread::watcher_thread()->has_crash_protection(),
-        "locking not allowed when crash protection is set");
+           "locking not allowed when crash protection is set");
   }
 }
 
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp	Wed Sep 10 17:06:36 2014 -0700
@@ -45,7 +45,7 @@
 #include "utilities/preserveException.hpp"
 
 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
-  // Need to inhibit inlining for older versions of GCC to avoid build-time failures
+// Need to inhibit inlining for older versions of GCC to avoid build-time failures
   #define NOINLINE __attribute__((noinline))
 #else
   #define NOINLINE
@@ -70,10 +70,10 @@
 
 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
   {                                                                        \
-    if (DTraceMonitorProbes) {                                            \
+    if (DTraceMonitorProbes) {                                             \
       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
       HOTSPOT_MONITOR_WAIT(jtid,                                           \
-                       (monitor), bytes, len, (millis));                   \
+                           (monitor), bytes, len, (millis));               \
     }                                                                      \
   }
 
@@ -85,10 +85,10 @@
 
 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
   {                                                                        \
-    if (DTraceMonitorProbes) {                                            \
+    if (DTraceMonitorProbes) {                                             \
       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
-      HOTSPOT_MONITOR_##probe(jtid,                                               \
-                       (uintptr_t)(monitor), bytes, len);                  \
+      HOTSPOT_MONITOR_##probe(jtid,                                        \
+                              (uintptr_t)(monitor), bytes, len);           \
     }                                                                      \
   }
 
@@ -254,11 +254,11 @@
 bool ObjectMonitor::try_enter(Thread* THREAD) {
   if (THREAD != _owner) {
     if (THREAD->is_lock_owned ((address)_owner)) {
-       assert(_recursions == 0, "internal state error");
-       _owner = THREAD;
-       _recursions = 1;
-       OwnerIsThread = 1;
-       return true;
+      assert(_recursions == 0, "internal state error");
+      _owner = THREAD;
+      _recursions = 1;
+      OwnerIsThread = 1;
+      return true;
     }
     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
       return false;
@@ -277,17 +277,17 @@
 
   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
   if (cur == NULL) {
-     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
-     assert(_recursions == 0   , "invariant");
-     assert(_owner      == Self, "invariant");
-     // CONSIDER: set or assert OwnerIsThread == 1
-     return;
+    // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
+    assert(_recursions == 0, "invariant");
+    assert(_owner == Self, "invariant");
+    // CONSIDER: set or assert OwnerIsThread == 1
+    return;
   }
 
   if (cur == Self) {
-     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
-     _recursions++;
-     return;
+    // TODO-FIXME: check for integer overflow!  BUGID 6557169.
+    _recursions++;
+    return;
   }
 
   if (Self->is_lock_owned ((address)cur)) {
@@ -310,20 +310,20 @@
   // Note that if we acquire the monitor from an initial spin
   // we forgo posting JVMTI events and firing DTRACE probes.
   if (Knob_SpinEarly && TrySpin (Self) > 0) {
-     assert(_owner == Self      , "invariant");
-     assert(_recursions == 0    , "invariant");
-     assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
-     Self->_Stalled = 0;
-     return;
+    assert(_owner == Self, "invariant");
+    assert(_recursions == 0, "invariant");
+    assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+    Self->_Stalled = 0;
+    return;
   }
 
-  assert(_owner != Self          , "invariant");
-  assert(_succ  != Self          , "invariant");
-  assert(Self->is_Java_thread()  , "invariant");
+  assert(_owner != Self, "invariant");
+  assert(_succ != Self, "invariant");
+  assert(Self->is_Java_thread(), "invariant");
   JavaThread * jt = (JavaThread *) Self;
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
-  assert(jt->thread_state() != _thread_blocked   , "invariant");
-  assert(this->object() != NULL  , "invariant");
+  assert(jt->thread_state() != _thread_blocked, "invariant");
+  assert(this->object() != NULL, "invariant");
   assert(_count >= 0, "invariant");
 
   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
@@ -361,13 +361,12 @@
 
       if (!ExitSuspendEquivalent(jt)) break;
 
-      //
       // We have acquired the contended monitor, but while we were
       // waiting another thread suspended us. We don't want to enter
       // the monitor while suspended because that would surprise the
       // thread that suspended us.
       //
-          _recursions = 0;
+      _recursions = 0;
       _succ = NULL;
       exit(false, Self);
 
@@ -390,9 +389,9 @@
   Self->_Stalled = 0;
 
   // Must either set _recursions = 0 or ASSERT _recursions == 0.
-  assert(_recursions == 0     , "invariant");
-  assert(_owner == Self       , "invariant");
-  assert(_succ  != Self       , "invariant");
+  assert(_recursions == 0, "invariant");
+  assert(_owner == Self, "invariant");
+  assert(_succ != Self, "invariant");
   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 
   // The thread -- now the owner -- is back in vm mode.
@@ -426,7 +425,7 @@
   }
 
   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
-     ObjectMonitor::_sync_ContendedLockAttempts->inc();
+    ObjectMonitor::_sync_ContendedLockAttempts->inc();
   }
 }
 
@@ -434,7 +433,7 @@
 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 // Callers must compensate as needed.
 
-int ObjectMonitor::TryLock (Thread * Self) {
+int ObjectMonitor::TryLock(Thread * Self) {
   void * own = _owner;
   if (own != NULL) return 0;
   if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
@@ -451,245 +450,244 @@
   return -1;
 }
 
-void NOINLINE ObjectMonitor::EnterI (TRAPS) {
-    Thread * const Self = THREAD;
-    assert(Self->is_Java_thread(), "invariant");
-    assert(((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant");
+void NOINLINE ObjectMonitor::EnterI(TRAPS) {
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "invariant");
+  assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 
-    // Try the lock - TATAS
-    if (TryLock (Self) > 0) {
-        assert(_succ != Self              , "invariant");
-        assert(_owner == Self             , "invariant");
-        assert(_Responsible != Self       , "invariant");
-        return;
-    }
+  // Try the lock - TATAS
+  if (TryLock (Self) > 0) {
+    assert(_succ != Self, "invariant");
+    assert(_owner == Self, "invariant");
+    assert(_Responsible != Self, "invariant");
+    return;
+  }
 
-    DeferredInitialize();
+  DeferredInitialize();
 
-    // We try one round of spinning *before* enqueueing Self.
-    //
-    // If the _owner is ready but OFFPROC we could use a YieldTo()
-    // operation to donate the remainder of this thread's quantum
-    // to the owner.  This has subtle but beneficial affinity
-    // effects.
+  // We try one round of spinning *before* enqueueing Self.
+  //
+  // If the _owner is ready but OFFPROC we could use a YieldTo()
+  // operation to donate the remainder of this thread's quantum
+  // to the owner.  This has subtle but beneficial affinity
+  // effects.
 
-    if (TrySpin (Self) > 0) {
-        assert(_owner == Self        , "invariant");
-        assert(_succ != Self         , "invariant");
-        assert(_Responsible != Self  , "invariant");
-        return;
-    }
+  if (TrySpin (Self) > 0) {
+    assert(_owner == Self, "invariant");
+    assert(_succ != Self, "invariant");
+    assert(_Responsible != Self, "invariant");
+    return;
+  }
+
+  // The Spin failed -- Enqueue and park the thread ...
+  assert(_succ != Self, "invariant");
+  assert(_owner != Self, "invariant");
+  assert(_Responsible != Self, "invariant");
 
-    // The Spin failed -- Enqueue and park the thread ...
-    assert(_succ  != Self            , "invariant");
-    assert(_owner != Self            , "invariant");
-    assert(_Responsible != Self      , "invariant");
+  // Enqueue "Self" on ObjectMonitor's _cxq.
+  //
+  // Node acts as a proxy for Self.
+  // As an aside, if were to ever rewrite the synchronization code mostly
+  // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
+  // Java objects.  This would avoid awkward lifecycle and liveness issues,
+  // as well as eliminate a subset of ABA issues.
+  // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
 
-    // Enqueue "Self" on ObjectMonitor's _cxq.
-    //
-    // Node acts as a proxy for Self.
-    // As an aside, if were to ever rewrite the synchronization code mostly
-    // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
-    // Java objects.  This would avoid awkward lifecycle and liveness issues,
-    // as well as eliminate a subset of ABA issues.
-    // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
-    //
+  ObjectWaiter node(Self);
+  Self->_ParkEvent->reset();
+  node._prev   = (ObjectWaiter *) 0xBAD;
+  node.TState  = ObjectWaiter::TS_CXQ;
+
+  // Push "Self" onto the front of the _cxq.
+  // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
+  // Note that spinning tends to reduce the rate at which threads
+  // enqueue and dequeue on EntryList|cxq.
+  ObjectWaiter * nxt;
+  for (;;) {
+    node._next = nxt = _cxq;
+    if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break;
 
-    ObjectWaiter node(Self);
-    Self->_ParkEvent->reset();
-    node._prev   = (ObjectWaiter *) 0xBAD;
-    node.TState  = ObjectWaiter::TS_CXQ;
-
-    // Push "Self" onto the front of the _cxq.
-    // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
-    // Note that spinning tends to reduce the rate at which threads
-    // enqueue and dequeue on EntryList|cxq.
-    ObjectWaiter * nxt;
-    for (;;) {
-        node._next = nxt = _cxq;
-        if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break;
-
-        // Interference - the CAS failed because _cxq changed.  Just retry.
-        // As an optional optimization we retry the lock.
-        if (TryLock (Self) > 0) {
-            assert(_succ != Self         , "invariant");
-            assert(_owner == Self        , "invariant");
-            assert(_Responsible != Self  , "invariant");
-            return;
-        }
+    // Interference - the CAS failed because _cxq changed.  Just retry.
+    // As an optional optimization we retry the lock.
+    if (TryLock (Self) > 0) {
+      assert(_succ != Self, "invariant");
+      assert(_owner == Self, "invariant");
+      assert(_Responsible != Self, "invariant");
+      return;
     }
+  }
 
-    // Check for cxq|EntryList edge transition to non-null.  This indicates
-    // the onset of contention.  While contention persists exiting threads
-    // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
-    // operations revert to the faster 1-0 mode.  This enter operation may interleave
-    // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
-    // arrange for one of the contending thread to use a timed park() operations
-    // to detect and recover from the race.  (Stranding is form of progress failure
-    // where the monitor is unlocked but all the contending threads remain parked).
-    // That is, at least one of the contended threads will periodically poll _owner.
-    // One of the contending threads will become the designated "Responsible" thread.
-    // The Responsible thread uses a timed park instead of a normal indefinite park
-    // operation -- it periodically wakes and checks for and recovers from potential
-    // strandings admitted by 1-0 exit operations.   We need at most one Responsible
-    // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
-    // be responsible for a monitor.
-    //
-    // Currently, one of the contended threads takes on the added role of "Responsible".
-    // A viable alternative would be to use a dedicated "stranding checker" thread
-    // that periodically iterated over all the threads (or active monitors) and unparked
-    // successors where there was risk of stranding.  This would help eliminate the
-    // timer scalability issues we see on some platforms as we'd only have one thread
-    // -- the checker -- parked on a timer.
+  // Check for cxq|EntryList edge transition to non-null.  This indicates
+  // the onset of contention.  While contention persists exiting threads
+  // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
+  // operations revert to the faster 1-0 mode.  This enter operation may interleave
+  // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
+  // arrange for one of the contending thread to use a timed park() operations
+  // to detect and recover from the race.  (Stranding is form of progress failure
+  // where the monitor is unlocked but all the contending threads remain parked).
+  // That is, at least one of the contended threads will periodically poll _owner.
+  // One of the contending threads will become the designated "Responsible" thread.
+  // The Responsible thread uses a timed park instead of a normal indefinite park
+  // operation -- it periodically wakes and checks for and recovers from potential
+  // strandings admitted by 1-0 exit operations.   We need at most one Responsible
+  // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
+  // be responsible for a monitor.
+  //
+  // Currently, one of the contended threads takes on the added role of "Responsible".
+  // A viable alternative would be to use a dedicated "stranding checker" thread
+  // that periodically iterated over all the threads (or active monitors) and unparked
+  // successors where there was risk of stranding.  This would help eliminate the
+  // timer scalability issues we see on some platforms as we'd only have one thread
+  // -- the checker -- parked on a timer.
 
-    if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
-        // Try to assume the role of responsible thread for the monitor.
-        // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
-        Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
+  if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
+    // Try to assume the role of responsible thread for the monitor.
+    // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
+    Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
+  }
+
+  // The lock might have been released while this thread was occupied queueing
+  // itself onto _cxq.  To close the race and avoid "stranding" and
+  // progress-liveness failure we must resample-retry _owner before parking.
+  // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
+  // In this case the ST-MEMBAR is accomplished with CAS().
+  //
+  // TODO: Defer all thread state transitions until park-time.
+  // Since state transitions are heavy and inefficient we'd like
+  // to defer the state transitions until absolutely necessary,
+  // and in doing so avoid some transitions ...
+
+  TEVENT(Inflated enter - Contention);
+  int nWakeups = 0;
+  int RecheckInterval = 1;
+
+  for (;;) {
+
+    if (TryLock(Self) > 0) break;
+    assert(_owner != Self, "invariant");
+
+    if ((SyncFlags & 2) && _Responsible == NULL) {
+      Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
     }
 
-    // The lock might have been released while this thread was occupied queueing
-    // itself onto _cxq.  To close the race and avoid "stranding" and
-    // progress-liveness failure we must resample-retry _owner before parking.
-    // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
-    // In this case the ST-MEMBAR is accomplished with CAS().
-    //
-    // TODO: Defer all thread state transitions until park-time.
-    // Since state transitions are heavy and inefficient we'd like
-    // to defer the state transitions until absolutely necessary,
-    // and in doing so avoid some transitions ...
-
-    TEVENT(Inflated enter - Contention);
-    int nWakeups = 0;
-    int RecheckInterval = 1;
-
-    for (;;) {
-
-        if (TryLock(Self) > 0) break;
-        assert(_owner != Self, "invariant");
-
-        if ((SyncFlags & 2) && _Responsible == NULL) {
-           Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
-        }
-
-        // park self
-        if (_Responsible == Self || (SyncFlags & 1)) {
-            TEVENT(Inflated enter - park TIMED);
-            Self->_ParkEvent->park((jlong) RecheckInterval);
-            // Increase the RecheckInterval, but clamp the value.
-            RecheckInterval *= 8;
-            if (RecheckInterval > 1000) RecheckInterval = 1000;
-        } else {
-            TEVENT(Inflated enter - park UNTIMED);
-            Self->_ParkEvent->park();
-        }
-
-        if (TryLock(Self) > 0) break;
-
-        // The lock is still contested.
-        // Keep a tally of the # of futile wakeups.
-        // Note that the counter is not protected by a lock or updated by atomics.
-        // That is by design - we trade "lossy" counters which are exposed to
-        // races during updates for a lower probe effect.
-        TEVENT(Inflated enter - Futile wakeup);
-        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
-           ObjectMonitor::_sync_FutileWakeups->inc();
-        }
-        ++nWakeups;
-
-        // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
-        // We can defer clearing _succ until after the spin completes
-        // TrySpin() must tolerate being called with _succ == Self.
-        // Try yet another round of adaptive spinning.
-        if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
-
-        // We can find that we were unpark()ed and redesignated _succ while
-        // we were spinning.  That's harmless.  If we iterate and call park(),
-        // park() will consume the event and return immediately and we'll
-        // just spin again.  This pattern can repeat, leaving _succ to simply
-        // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
-        // Alternately, we can sample fired() here, and if set, forgo spinning
-        // in the next iteration.
-
-        if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
-           Self->_ParkEvent->reset();
-           OrderAccess::fence();
-        }
-        if (_succ == Self) _succ = NULL;
-
-        // Invariant: after clearing _succ a thread *must* retry _owner before parking.
-        OrderAccess::fence();
+    // park self
+    if (_Responsible == Self || (SyncFlags & 1)) {
+      TEVENT(Inflated enter - park TIMED);
+      Self->_ParkEvent->park((jlong) RecheckInterval);
+      // Increase the RecheckInterval, but clamp the value.
+      RecheckInterval *= 8;
+      if (RecheckInterval > 1000) RecheckInterval = 1000;
+    } else {
+      TEVENT(Inflated enter - park UNTIMED);
+      Self->_ParkEvent->park();
     }
 
-    // Egress :
-    // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
-    // Normally we'll find Self on the EntryList .
-    // From the perspective of the lock owner (this thread), the
-    // EntryList is stable and cxq is prepend-only.
-    // The head of cxq is volatile but the interior is stable.
-    // In addition, Self.TState is stable.
+    if (TryLock(Self) > 0) break;
+
+    // The lock is still contested.
+    // Keep a tally of the # of futile wakeups.
+    // Note that the counter is not protected by a lock or updated by atomics.
+    // That is by design - we trade "lossy" counters which are exposed to
+    // races during updates for a lower probe effect.
+    TEVENT(Inflated enter - Futile wakeup);
+    if (ObjectMonitor::_sync_FutileWakeups != NULL) {
+      ObjectMonitor::_sync_FutileWakeups->inc();
+    }
+    ++nWakeups;
 
-    assert(_owner == Self      , "invariant");
-    assert(object() != NULL    , "invariant");
-    // I'd like to write:
-    //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
-    // but as we're at a safepoint that's not safe.
+    // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
+    // We can defer clearing _succ until after the spin completes
+    // TrySpin() must tolerate being called with _succ == Self.
+    // Try yet another round of adaptive spinning.
+    if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
 
-    UnlinkAfterAcquire(Self, &node);
+    // We can find that we were unpark()ed and redesignated _succ while
+    // we were spinning.  That's harmless.  If we iterate and call park(),
+    // park() will consume the event and return immediately and we'll
+    // just spin again.  This pattern can repeat, leaving _succ to simply
+    // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
+    // Alternately, we can sample fired() here, and if set, forgo spinning
+    // in the next iteration.
+
+    if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
+      Self->_ParkEvent->reset();
+      OrderAccess::fence();
+    }
     if (_succ == Self) _succ = NULL;
 
-    assert(_succ != Self, "invariant");
-    if (_Responsible == Self) {
-        _Responsible = NULL;
-        OrderAccess::fence(); // Dekker pivot-point
+    // Invariant: after clearing _succ a thread *must* retry _owner before parking.
+    OrderAccess::fence();
+  }
+
+  // Egress :
+  // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
+  // Normally we'll find Self on the EntryList .
+  // From the perspective of the lock owner (this thread), the
+  // EntryList is stable and cxq is prepend-only.
+  // The head of cxq is volatile but the interior is stable.
+  // In addition, Self.TState is stable.
 
-        // We may leave threads on cxq|EntryList without a designated
-        // "Responsible" thread.  This is benign.  When this thread subsequently
-        // exits the monitor it can "see" such preexisting "old" threads --
-        // threads that arrived on the cxq|EntryList before the fence, above --
-        // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
-        // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
-        // non-null and elect a new "Responsible" timer thread.
-        //
-        // This thread executes:
-        //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
-        //    LD cxq|EntryList               (in subsequent exit)
-        //
-        // Entering threads in the slow/contended path execute:
-        //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
-        //    The (ST cxq; MEMBAR) is accomplished with CAS().
-        //
-        // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
-        // exit operation from floating above the ST Responsible=null.
-    }
+  assert(_owner == Self, "invariant");
+  assert(object() != NULL, "invariant");
+  // I'd like to write:
+  //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+  // but as we're at a safepoint that's not safe.
+
+  UnlinkAfterAcquire(Self, &node);
+  if (_succ == Self) _succ = NULL;
+
+  assert(_succ != Self, "invariant");
+  if (_Responsible == Self) {
+    _Responsible = NULL;
+    OrderAccess::fence(); // Dekker pivot-point
 
-    // We've acquired ownership with CAS().
-    // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
-    // But since the CAS() this thread may have also stored into _succ,
-    // EntryList, cxq or Responsible.  These meta-data updates must be
-    // visible __before this thread subsequently drops the lock.
-    // Consider what could occur if we didn't enforce this constraint --
-    // STs to monitor meta-data and user-data could reorder with (become
-    // visible after) the ST in exit that drops ownership of the lock.
-    // Some other thread could then acquire the lock, but observe inconsistent
-    // or old monitor meta-data and heap data.  That violates the JMM.
-    // To that end, the 1-0 exit() operation must have at least STST|LDST
-    // "release" barrier semantics.  Specifically, there must be at least a
-    // STST|LDST barrier in exit() before the ST of null into _owner that drops
-    // the lock.   The barrier ensures that changes to monitor meta-data and data
-    // protected by the lock will be visible before we release the lock, and
-    // therefore before some other thread (CPU) has a chance to acquire the lock.
-    // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+    // We may leave threads on cxq|EntryList without a designated
+    // "Responsible" thread.  This is benign.  When this thread subsequently
+    // exits the monitor it can "see" such preexisting "old" threads --
+    // threads that arrived on the cxq|EntryList before the fence, above --
+    // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
+    // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
+    // non-null and elect a new "Responsible" timer thread.
+    //
+    // This thread executes:
+    //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
+    //    LD cxq|EntryList               (in subsequent exit)
+    //
+    // Entering threads in the slow/contended path execute:
+    //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
+    //    The (ST cxq; MEMBAR) is accomplished with CAS().
     //
-    // Critically, any prior STs to _succ or EntryList must be visible before
-    // the ST of null into _owner in the *subsequent* (following) corresponding
-    // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
-    // execute a serializing instruction.
+    // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
+    // exit operation from floating above the ST Responsible=null.
+  }
 
-    if (SyncFlags & 8) {
-       OrderAccess::fence();
-    }
-    return;
+  // We've acquired ownership with CAS().
+  // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
+  // But since the CAS() this thread may have also stored into _succ,
+  // EntryList, cxq or Responsible.  These meta-data updates must be
+  // visible __before this thread subsequently drops the lock.
+  // Consider what could occur if we didn't enforce this constraint --
+  // STs to monitor meta-data and user-data could reorder with (become
+  // visible after) the ST in exit that drops ownership of the lock.
+  // Some other thread could then acquire the lock, but observe inconsistent
+  // or old monitor meta-data and heap data.  That violates the JMM.
+  // To that end, the 1-0 exit() operation must have at least STST|LDST
+  // "release" barrier semantics.  Specifically, there must be at least a
+  // STST|LDST barrier in exit() before the ST of null into _owner that drops
+  // the lock.   The barrier ensures that changes to monitor meta-data and data
+  // protected by the lock will be visible before we release the lock, and
+  // therefore before some other thread (CPU) has a chance to acquire the lock.
+  // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+  //
+  // Critically, any prior STs to _succ or EntryList must be visible before
+  // the ST of null into _owner in the *subsequent* (following) corresponding
+  // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
+  // execute a serializing instruction.
+
+  if (SyncFlags & 8) {
+    OrderAccess::fence();
+  }
+  return;
 }
 
 // ReenterI() is a specialized inline form of the latter half of the
@@ -700,160 +698,159 @@
 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
 // loop accordingly.
 
-void NOINLINE ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
-    assert(Self != NULL                , "invariant");
-    assert(SelfNode != NULL            , "invariant");
-    assert(SelfNode->_thread == Self   , "invariant");
-    assert(_waiters > 0                , "invariant");
-    assert(((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant");
-    assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
-    JavaThread * jt = (JavaThread *) Self;
+void NOINLINE ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
+  assert(Self != NULL, "invariant");
+  assert(SelfNode != NULL, "invariant");
+  assert(SelfNode->_thread == Self, "invariant");
+  assert(_waiters > 0, "invariant");
+  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+  assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
+  JavaThread * jt = (JavaThread *) Self;
 
-    int nWakeups = 0;
-    for (;;) {
-        ObjectWaiter::TStates v = SelfNode->TState;
-        guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
-        assert(_owner != Self, "invariant");
-
-        if (TryLock(Self) > 0) break;
-        if (TrySpin(Self) > 0) break;
+  int nWakeups = 0;
+  for (;;) {
+    ObjectWaiter::TStates v = SelfNode->TState;
+    guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
+    assert(_owner != Self, "invariant");
 
-        TEVENT(Wait Reentry - parking);
+    if (TryLock(Self) > 0) break;
+    if (TrySpin(Self) > 0) break;
 
-        // State transition wrappers around park() ...
-        // ReenterI() wisely defers state transitions until
-        // it's clear we must park the thread.
-        {
-           OSThreadContendState osts(Self->osthread());
-           ThreadBlockInVM tbivm(jt);
+    TEVENT(Wait Reentry - parking);
 
-           // cleared by handle_special_suspend_equivalent_condition()
-           // or java_suspend_self()
-           jt->set_suspend_equivalent();
-           if (SyncFlags & 1) {
-              Self->_ParkEvent->park((jlong)1000);
-           } else {
-              Self->_ParkEvent->park();
-           }
-
-           // were we externally suspended while we were waiting?
-           for (;;) {
-              if (!ExitSuspendEquivalent(jt)) break;
-              if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
-              jt->java_suspend_self();
-              jt->set_suspend_equivalent();
-           }
-        }
+    // State transition wrappers around park() ...
+    // ReenterI() wisely defers state transitions until
+    // it's clear we must park the thread.
+    {
+      OSThreadContendState osts(Self->osthread());
+      ThreadBlockInVM tbivm(jt);
 
-        // Try again, but just so we distinguish between futile wakeups and
-        // successful wakeups.  The following test isn't algorithmically
-        // necessary, but it helps us maintain sensible statistics.
-        if (TryLock(Self) > 0) break;
+      // cleared by handle_special_suspend_equivalent_condition()
+      // or java_suspend_self()
+      jt->set_suspend_equivalent();
+      if (SyncFlags & 1) {
+        Self->_ParkEvent->park((jlong)1000);
+      } else {
+        Self->_ParkEvent->park();
+      }
 
-        // The lock is still contested.
-        // Keep a tally of the # of futile wakeups.
-        // Note that the counter is not protected by a lock or updated by atomics.
-        // That is by design - we trade "lossy" counters which are exposed to
-        // races during updates for a lower probe effect.
-        TEVENT(Wait Reentry - futile wakeup);
-        ++nWakeups;
-
-        // Assuming this is not a spurious wakeup we'll normally
-        // find that _succ == Self.
-        if (_succ == Self) _succ = NULL;
-
-        // Invariant: after clearing _succ a contending thread
-        // *must* retry  _owner before parking.
-        OrderAccess::fence();
-
-        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
-          ObjectMonitor::_sync_FutileWakeups->inc();
-        }
+      // were we externally suspended while we were waiting?
+      for (;;) {
+        if (!ExitSuspendEquivalent(jt)) break;
+        if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
+        jt->java_suspend_self();
+        jt->set_suspend_equivalent();
+      }
     }
 
-    // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
-    // Normally we'll find Self on the EntryList.
-    // Unlinking from the EntryList is constant-time and atomic-free.
-    // From the perspective of the lock owner (this thread), the
-    // EntryList is stable and cxq is prepend-only.
-    // The head of cxq is volatile but the interior is stable.
-    // In addition, Self.TState is stable.
+    // Try again, but just so we distinguish between futile wakeups and
+    // successful wakeups.  The following test isn't algorithmically
+    // necessary, but it helps us maintain sensible statistics.
+    if (TryLock(Self) > 0) break;
+
+    // The lock is still contested.
+    // Keep a tally of the # of futile wakeups.
+    // Note that the counter is not protected by a lock or updated by atomics.
+    // That is by design - we trade "lossy" counters which are exposed to
+    // races during updates for a lower probe effect.
+    TEVENT(Wait Reentry - futile wakeup);
+    ++nWakeups;
+
+    // Assuming this is not a spurious wakeup we'll normally
+    // find that _succ == Self.
+    if (_succ == Self) _succ = NULL;
+
+    // Invariant: after clearing _succ a contending thread
+    // *must* retry  _owner before parking.
+    OrderAccess::fence();
 
-    assert(_owner == Self, "invariant");
-    assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
-    UnlinkAfterAcquire(Self, SelfNode);
-    if (_succ == Self) _succ = NULL;
-    assert(_succ != Self, "invariant");
-    SelfNode->TState = ObjectWaiter::TS_RUN;
-    OrderAccess::fence();      // see comments at the end of EnterI()
+    if (ObjectMonitor::_sync_FutileWakeups != NULL) {
+      ObjectMonitor::_sync_FutileWakeups->inc();
+    }
+  }
+
+  // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
+  // Normally we'll find Self on the EntryList.
+  // Unlinking from the EntryList is constant-time and atomic-free.
+  // From the perspective of the lock owner (this thread), the
+  // EntryList is stable and cxq is prepend-only.
+  // The head of cxq is volatile but the interior is stable.
+  // In addition, Self.TState is stable.
+
+  assert(_owner == Self, "invariant");
+  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+  UnlinkAfterAcquire(Self, SelfNode);
+  if (_succ == Self) _succ = NULL;
+  assert(_succ != Self, "invariant");
+  SelfNode->TState = ObjectWaiter::TS_RUN;
+  OrderAccess::fence();      // see comments at the end of EnterI()
 }
 
 // By convention we unlink a contending thread from EntryList|cxq immediately
 // after the thread acquires the lock in ::enter().  Equally, we could defer
 // unlinking the thread until ::exit()-time.
 
-void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
-{
-    assert(_owner == Self, "invariant");
-    assert(SelfNode->_thread == Self, "invariant");
+void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
+  assert(_owner == Self, "invariant");
+  assert(SelfNode->_thread == Self, "invariant");
 
-    if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
-        // Normal case: remove Self from the DLL EntryList .
-        // This is a constant-time operation.
-        ObjectWaiter * nxt = SelfNode->_next;
-        ObjectWaiter * prv = SelfNode->_prev;
-        if (nxt != NULL) nxt->_prev = prv;
-        if (prv != NULL) prv->_next = nxt;
-        if (SelfNode == _EntryList) _EntryList = nxt;
-        assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
-        assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
-        TEVENT(Unlink from EntryList);
-    } else {
-        assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
-        // Inopportune interleaving -- Self is still on the cxq.
-        // This usually means the enqueue of self raced an exiting thread.
-        // Normally we'll find Self near the front of the cxq, so
-        // dequeueing is typically fast.  If needbe we can accelerate
-        // this with some MCS/CHL-like bidirectional list hints and advisory
-        // back-links so dequeueing from the interior will normally operate
-        // in constant-time.
-        // Dequeue Self from either the head (with CAS) or from the interior
-        // with a linear-time scan and normal non-atomic memory operations.
-        // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
-        // and then unlink Self from EntryList.  We have to drain eventually,
-        // so it might as well be now.
+  if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
+    // Normal case: remove Self from the DLL EntryList .
+    // This is a constant-time operation.
+    ObjectWaiter * nxt = SelfNode->_next;
+    ObjectWaiter * prv = SelfNode->_prev;
+    if (nxt != NULL) nxt->_prev = prv;
+    if (prv != NULL) prv->_next = nxt;
+    if (SelfNode == _EntryList) _EntryList = nxt;
+    assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
+    assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
+    TEVENT(Unlink from EntryList);
+  } else {
+    assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
+    // Inopportune interleaving -- Self is still on the cxq.
+    // This usually means the enqueue of self raced an exiting thread.
+    // Normally we'll find Self near the front of the cxq, so
+    // dequeueing is typically fast.  If needbe we can accelerate
+    // this with some MCS/CHL-like bidirectional list hints and advisory
+    // back-links so dequeueing from the interior will normally operate
+    // in constant-time.
+    // Dequeue Self from either the head (with CAS) or from the interior
+    // with a linear-time scan and normal non-atomic memory operations.
+    // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
+    // and then unlink Self from EntryList.  We have to drain eventually,
+    // so it might as well be now.
 
-        ObjectWaiter * v = _cxq;
-        assert(v != NULL, "invariant");
-        if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
-            // The CAS above can fail from interference IFF a "RAT" arrived.
-            // In that case Self must be in the interior and can no longer be
-            // at the head of cxq.
-            if (v == SelfNode) {
-                assert(_cxq != v, "invariant");
-                v = _cxq;          // CAS above failed - start scan at head of list
-            }
-            ObjectWaiter * p;
-            ObjectWaiter * q = NULL;
-            for (p = v; p != NULL && p != SelfNode; p = p->_next) {
-                q = p;
-                assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
-            }
-            assert(v != SelfNode, "invariant");
-            assert(p == SelfNode, "Node not found on cxq");
-            assert(p != _cxq, "invariant");
-            assert(q != NULL, "invariant");
-            assert(q->_next == p, "invariant");
-            q->_next = p->_next;
-        }
-        TEVENT(Unlink from cxq);
+    ObjectWaiter * v = _cxq;
+    assert(v != NULL, "invariant");
+    if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
+      // The CAS above can fail from interference IFF a "RAT" arrived.
+      // In that case Self must be in the interior and can no longer be
+      // at the head of cxq.
+      if (v == SelfNode) {
+        assert(_cxq != v, "invariant");
+        v = _cxq;          // CAS above failed - start scan at head of list
+      }
+      ObjectWaiter * p;
+      ObjectWaiter * q = NULL;
+      for (p = v; p != NULL && p != SelfNode; p = p->_next) {
+        q = p;
+        assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
+      }
+      assert(v != SelfNode, "invariant");
+      assert(p == SelfNode, "Node not found on cxq");
+      assert(p != _cxq, "invariant");
+      assert(q != NULL, "invariant");
+      assert(q->_next == p, "invariant");
+      q->_next = p->_next;
     }
+    TEVENT(Unlink from cxq);
+  }
 
 #ifdef ASSERT
-    // Diagnostic hygiene ...
-    SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
-    SelfNode->_next  = (ObjectWaiter *) 0xBAD;
-    SelfNode->TState = ObjectWaiter::TS_RUN;
+  // Diagnostic hygiene ...
+  SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
+  SelfNode->_next  = (ObjectWaiter *) 0xBAD;
+  SelfNode->TState = ObjectWaiter::TS_RUN;
 #endif
 }
 
@@ -915,331 +912,332 @@
 // of such futile wakups is low.
 
 void NOINLINE ObjectMonitor::exit(bool not_suspended, TRAPS) {
-   Thread * const Self = THREAD;
-   if (THREAD != _owner) {
-     if (THREAD->is_lock_owned((address) _owner)) {
-       // Transmute _owner from a BasicLock pointer to a Thread address.
-       // We don't need to hold _mutex for this transition.
-       // Non-null to Non-null is safe as long as all readers can
-       // tolerate either flavor.
-       assert(_recursions == 0, "invariant");
-       _owner = THREAD;
-       _recursions = 0;
-       OwnerIsThread = 1;
-     } else {
-       // Apparent unbalanced locking ...
-       // Naively we'd like to throw IllegalMonitorStateException.
-       // As a practical matter we can neither allocate nor throw an
-       // exception as ::exit() can be called from leaf routines.
-       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
-       // Upon deeper reflection, however, in a properly run JVM the only
-       // way we should encounter this situation is in the presence of
-       // unbalanced JNI locking. TODO: CheckJNICalls.
-       // See also: CR4414101
-       TEVENT(Exit - Throw IMSX);
-       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
-       return;
-     }
-   }
+  Thread * const Self = THREAD;
+  if (THREAD != _owner) {
+    if (THREAD->is_lock_owned((address) _owner)) {
+      // Transmute _owner from a BasicLock pointer to a Thread address.
+      // We don't need to hold _mutex for this transition.
+      // Non-null to Non-null is safe as long as all readers can
+      // tolerate either flavor.
+      assert(_recursions == 0, "invariant");
+      _owner = THREAD;
+      _recursions = 0;
+      OwnerIsThread = 1;
+    } else {
+      // Apparent unbalanced locking ...
+      // Naively we'd like to throw IllegalMonitorStateException.
+      // As a practical matter we can neither allocate nor throw an
+      // exception as ::exit() can be called from leaf routines.
+      // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
+      // Upon deeper reflection, however, in a properly run JVM the only
+      // way we should encounter this situation is in the presence of
+      // unbalanced JNI locking. TODO: CheckJNICalls.
+      // See also: CR4414101
+      TEVENT(Exit - Throw IMSX);
+      assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
+      return;
+    }
+  }
 
-   if (_recursions != 0) {
-     _recursions--;        // this is simple recursive enter
-     TEVENT(Inflated exit - recursive);
-     return;
-   }
+  if (_recursions != 0) {
+    _recursions--;        // this is simple recursive enter
+    TEVENT(Inflated exit - recursive);
+    return;
+  }
 
-   // Invariant: after setting Responsible=null an thread must execute
-   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
-   if ((SyncFlags & 4) == 0) {
-      _Responsible = NULL;
-   }
+  // Invariant: after setting Responsible=null an thread must execute
+  // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
+  if ((SyncFlags & 4) == 0) {
+    _Responsible = NULL;
+  }
 
 #if INCLUDE_TRACE
-   // get the owner's thread id for the MonitorEnter event
-   // if it is enabled and the thread isn't suspended
-   if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
-     _previous_owner_tid = SharedRuntime::get_java_tid(Self);
-   }
+  // get the owner's thread id for the MonitorEnter event
+  // if it is enabled and the thread isn't suspended
+  if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
+    _previous_owner_tid = SharedRuntime::get_java_tid(Self);
+  }
 #endif
 
-   for (;;) {
-      assert(THREAD == _owner, "invariant");
+  for (;;) {
+    assert(THREAD == _owner, "invariant");
 
 
-      if (Knob_ExitPolicy == 0) {
-         // release semantics: prior loads and stores from within the critical section
-         // must not float (reorder) past the following store that drops the lock.
-         // On SPARC that requires MEMBAR #loadstore|#storestore.
-         // But of course in TSO #loadstore|#storestore is not required.
-         // I'd like to write one of the following:
-         // A.  OrderAccess::release() ; _owner = NULL
-         // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
-         // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
-         // store into a _dummy variable.  That store is not needed, but can result
-         // in massive wasteful coherency traffic on classic SMP systems.
-         // Instead, I use release_store(), which is implemented as just a simple
-         // ST on x64, x86 and SPARC.
-         OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
-         OrderAccess::storeload();                         // See if we need to wake a successor
-         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
-            TEVENT(Inflated exit - simple egress);
-            return;
-         }
-         TEVENT(Inflated exit - complex egress);
-         // Other threads are blocked trying to acquire the lock.
+    if (Knob_ExitPolicy == 0) {
+      // release semantics: prior loads and stores from within the critical section
+      // must not float (reorder) past the following store that drops the lock.
+      // On SPARC that requires MEMBAR #loadstore|#storestore.
+      // But of course in TSO #loadstore|#storestore is not required.
+      // I'd like to write one of the following:
+      // A.  OrderAccess::release() ; _owner = NULL
+      // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
+      // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
+      // store into a _dummy variable.  That store is not needed, but can result
+      // in massive wasteful coherency traffic on classic SMP systems.
+      // Instead, I use release_store(), which is implemented as just a simple
+      // ST on x64, x86 and SPARC.
+      OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
+      OrderAccess::storeload();                        // See if we need to wake a successor
+      if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+        TEVENT(Inflated exit - simple egress);
+        return;
+      }
+      TEVENT(Inflated exit - complex egress);
+      // Other threads are blocked trying to acquire the lock.
+
+      // Normally the exiting thread is responsible for ensuring succession,
+      // but if other successors are ready or other entering threads are spinning
+      // then this thread can simply store NULL into _owner and exit without
+      // waking a successor.  The existence of spinners or ready successors
+      // guarantees proper succession (liveness).  Responsibility passes to the
+      // ready or running successors.  The exiting thread delegates the duty.
+      // More precisely, if a successor already exists this thread is absolved
+      // of the responsibility of waking (unparking) one.
+      //
+      // The _succ variable is critical to reducing futile wakeup frequency.
+      // _succ identifies the "heir presumptive" thread that has been made
+      // ready (unparked) but that has not yet run.  We need only one such
+      // successor thread to guarantee progress.
+      // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
+      // section 3.3 "Futile Wakeup Throttling" for details.
+      //
+      // Note that spinners in Enter() also set _succ non-null.
+      // In the current implementation spinners opportunistically set
+      // _succ so that exiting threads might avoid waking a successor.
+      // Another less appealing alternative would be for the exiting thread
+      // to drop the lock and then spin briefly to see if a spinner managed
+      // to acquire the lock.  If so, the exiting thread could exit
+      // immediately without waking a successor, otherwise the exiting
+      // thread would need to dequeue and wake a successor.
+      // (Note that we'd need to make the post-drop spin short, but no
+      // shorter than the worst-case round-trip cache-line migration time.
+      // The dropped lock needs to become visible to the spinner, and then
+      // the acquisition of the lock by the spinner must become visible to
+      // the exiting thread).
 
-         // Normally the exiting thread is responsible for ensuring succession,
-         // but if other successors are ready or other entering threads are spinning
-         // then this thread can simply store NULL into _owner and exit without
-         // waking a successor.  The existence of spinners or ready successors
-         // guarantees proper succession (liveness).  Responsibility passes to the
-         // ready or running successors.  The exiting thread delegates the duty.
-         // More precisely, if a successor already exists this thread is absolved
-         // of the responsibility of waking (unparking) one.
-         //
-         // The _succ variable is critical to reducing futile wakeup frequency.
-         // _succ identifies the "heir presumptive" thread that has been made
-         // ready (unparked) but that has not yet run.  We need only one such
-         // successor thread to guarantee progress.
-         // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
-         // section 3.3 "Futile Wakeup Throttling" for details.
-         //
-         // Note that spinners in Enter() also set _succ non-null.
-         // In the current implementation spinners opportunistically set
-         // _succ so that exiting threads might avoid waking a successor.
-         // Another less appealing alternative would be for the exiting thread
-         // to drop the lock and then spin briefly to see if a spinner managed
-         // to acquire the lock.  If so, the exiting thread could exit
-         // immediately without waking a successor, otherwise the exiting
-         // thread would need to dequeue and wake a successor.
-         // (Note that we'd need to make the post-drop spin short, but no
-         // shorter than the worst-case round-trip cache-line migration time.
-         // The dropped lock needs to become visible to the spinner, and then
-         // the acquisition of the lock by the spinner must become visible to
-         // the exiting thread).
-         //
+      // It appears that an heir-presumptive (successor) must be made ready.
+      // Only the current lock owner can manipulate the EntryList or
+      // drain _cxq, so we need to reacquire the lock.  If we fail
+      // to reacquire the lock the responsibility for ensuring succession
+      // falls to the new owner.
+      //
+      if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+        return;
+      }
+      TEVENT(Exit - Reacquired);
+    } else {
+      if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+        OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
+        OrderAccess::storeload();
+        // Ratify the previously observed values.
+        if (_cxq == NULL || _succ != NULL) {
+          TEVENT(Inflated exit - simple egress);
+          return;
+        }
 
-         // It appears that an heir-presumptive (successor) must be made ready.
-         // Only the current lock owner can manipulate the EntryList or
-         // drain _cxq, so we need to reacquire the lock.  If we fail
-         // to reacquire the lock the responsibility for ensuring succession
-         // falls to the new owner.
-         //
-         if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
-            return;
-         }
-         TEVENT(Exit - Reacquired);
+        // inopportune interleaving -- the exiting thread (this thread)
+        // in the fast-exit path raced an entering thread in the slow-enter
+        // path.
+        // We have two choices:
+        // A.  Try to reacquire the lock.
+        //     If the CAS() fails return immediately, otherwise
+        //     we either restart/rerun the exit operation, or simply
+        //     fall-through into the code below which wakes a successor.
+        // B.  If the elements forming the EntryList|cxq are TSM
+        //     we could simply unpark() the lead thread and return
+        //     without having set _succ.
+        if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+          TEVENT(Inflated exit - reacquired succeeded);
+          return;
+        }
+        TEVENT(Inflated exit - reacquired failed);
       } else {
-         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
-            OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
-            OrderAccess::storeload();
-            // Ratify the previously observed values.
-            if (_cxq == NULL || _succ != NULL) {
-                TEVENT(Inflated exit - simple egress);
-                return;
-            }
+        TEVENT(Inflated exit - complex egress);
+      }
+    }
+
+    guarantee(_owner == THREAD, "invariant");
+
+    ObjectWaiter * w = NULL;
+    int QMode = Knob_QMode;
 
-            // inopportune interleaving -- the exiting thread (this thread)
-            // in the fast-exit path raced an entering thread in the slow-enter
-            // path.
-            // We have two choices:
-            // A.  Try to reacquire the lock.
-            //     If the CAS() fails return immediately, otherwise
-            //     we either restart/rerun the exit operation, or simply
-            //     fall-through into the code below which wakes a successor.
-            // B.  If the elements forming the EntryList|cxq are TSM
-            //     we could simply unpark() the lead thread and return
-            //     without having set _succ.
-            if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
-               TEVENT(Inflated exit - reacquired succeeded);
-               return;
-            }
-            TEVENT(Inflated exit - reacquired failed);
-         } else {
-            TEVENT(Inflated exit - complex egress);
-         }
+    if (QMode == 2 && _cxq != NULL) {
+      // QMode == 2 : cxq has precedence over EntryList.
+      // Try to directly wake a successor from the cxq.
+      // If successful, the successor will need to unlink itself from cxq.
+      w = _cxq;
+      assert(w != NULL, "invariant");
+      assert(w->TState == ObjectWaiter::TS_CXQ, "Invariant");
+      ExitEpilog(Self, w);
+      return;
+    }
+
+    if (QMode == 3 && _cxq != NULL) {
+      // Aggressively drain cxq into EntryList at the first opportunity.
+      // This policy ensure that recently-run threads live at the head of EntryList.
+      // Drain _cxq into EntryList - bulk transfer.
+      // First, detach _cxq.
+      // The following loop is tantamount to: w = swap(&cxq, NULL)
+      w = _cxq;
+      for (;;) {
+        assert(w != NULL, "Invariant");
+        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+        if (u == w) break;
+        w = u;
+      }
+      assert(w != NULL, "invariant");
+
+      ObjectWaiter * q = NULL;
+      ObjectWaiter * p;
+      for (p = w; p != NULL; p = p->_next) {
+        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
+        p->TState = ObjectWaiter::TS_ENTER;
+        p->_prev = q;
+        q = p;
       }
 
-      guarantee(_owner == THREAD, "invariant");
-
-      ObjectWaiter * w = NULL;
-      int QMode = Knob_QMode;
-
-      if (QMode == 2 && _cxq != NULL) {
-          // QMode == 2 : cxq has precedence over EntryList.
-          // Try to directly wake a successor from the cxq.
-          // If successful, the successor will need to unlink itself from cxq.
-          w = _cxq;
-          assert(w != NULL, "invariant");
-          assert(w->TState == ObjectWaiter::TS_CXQ, "Invariant");
-          ExitEpilog(Self, w);
-          return;
-      }
-
-      if (QMode == 3 && _cxq != NULL) {
-          // Aggressively drain cxq into EntryList at the first opportunity.
-          // This policy ensure that recently-run threads live at the head of EntryList.
-          // Drain _cxq into EntryList - bulk transfer.
-          // First, detach _cxq.
-          // The following loop is tantamount to: w = swap (&cxq, NULL)
-          w = _cxq;
-          for (;;) {
-             assert(w != NULL, "Invariant");
-             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
-             if (u == w) break;
-             w = u;
-          }
-          assert(w != NULL              , "invariant");
-
-          ObjectWaiter * q = NULL;
-          ObjectWaiter * p;
-          for (p = w; p != NULL; p = p->_next) {
-              guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
-              p->TState = ObjectWaiter::TS_ENTER;
-              p->_prev = q;
-              q = p;
-          }
-
-          // Append the RATs to the EntryList
-          // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
-          ObjectWaiter * Tail;
-          for (Tail = _EntryList; Tail != NULL && Tail->_next != NULL; Tail = Tail->_next);
-          if (Tail == NULL) {
-              _EntryList = w;
-          } else {
-              Tail->_next = w;
-              w->_prev = Tail;
-          }
-
-          // Fall thru into code that tries to wake a successor from EntryList
+      // Append the RATs to the EntryList
+      // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
+      ObjectWaiter * Tail;
+      for (Tail = _EntryList; Tail != NULL && Tail->_next != NULL;
+           Tail = Tail->_next)
+        /* empty */;
+      if (Tail == NULL) {
+        _EntryList = w;
+      } else {
+        Tail->_next = w;
+        w->_prev = Tail;
       }
 
-      if (QMode == 4 && _cxq != NULL) {
-          // Aggressively drain cxq into EntryList at the first opportunity.
-          // This policy ensure that recently-run threads live at the head of EntryList.
-
-          // Drain _cxq into EntryList - bulk transfer.
-          // First, detach _cxq.
-          // The following loop is tantamount to: w = swap (&cxq, NULL)
-          w = _cxq;
-          for (;;) {
-             assert(w != NULL, "Invariant");
-             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
-             if (u == w) break;
-             w = u;
-          }
-          assert(w != NULL              , "invariant");
-
-          ObjectWaiter * q = NULL;
-          ObjectWaiter * p;
-          for (p = w; p != NULL; p = p->_next) {
-              guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
-              p->TState = ObjectWaiter::TS_ENTER;
-              p->_prev = q;
-              q = p;
-          }
+      // Fall thru into code that tries to wake a successor from EntryList
+    }
 
-          // Prepend the RATs to the EntryList
-          if (_EntryList != NULL) {
-              q->_next = _EntryList;
-              _EntryList->_prev = q;
-          }
-          _EntryList = w;
-
-          // Fall thru into code that tries to wake a successor from EntryList
-      }
-
-      w = _EntryList;
-      if (w != NULL) {
-          // I'd like to write: guarantee (w->_thread != Self).
-          // But in practice an exiting thread may find itself on the EntryList.
-          // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
-          // then calls exit().  Exit release the lock by setting O._owner to NULL.
-          // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
-          // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
-          // release the lock "O".  T2 resumes immediately after the ST of null into
-          // _owner, above.  T2 notices that the EntryList is populated, so it
-          // reacquires the lock and then finds itself on the EntryList.
-          // Given all that, we have to tolerate the circumstance where "w" is
-          // associated with Self.
-          assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
-          ExitEpilog(Self, w);
-          return;
-      }
-
-      // If we find that both _cxq and EntryList are null then just
-      // re-run the exit protocol from the top.
-      w = _cxq;
-      if (w == NULL) continue;
+    if (QMode == 4 && _cxq != NULL) {
+      // Aggressively drain cxq into EntryList at the first opportunity.
+      // This policy ensure that recently-run threads live at the head of EntryList.
 
       // Drain _cxq into EntryList - bulk transfer.
       // First, detach _cxq.
-      // The following loop is tantamount to: w = swap (&cxq, NULL)
+      // The following loop is tantamount to: w = swap(&cxq, NULL)
+      w = _cxq;
       for (;;) {
-          assert(w != NULL, "Invariant");
-          ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
-          if (u == w) break;
-          w = u;
+        assert(w != NULL, "Invariant");
+        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+        if (u == w) break;
+        w = u;
       }
-      TEVENT(Inflated exit - drain cxq into EntryList);
-
-      assert(w != NULL              , "invariant");
-      assert(_EntryList  == NULL    , "invariant");
-
-      // Convert the LIFO SLL anchored by _cxq into a DLL.
-      // The list reorganization step operates in O(LENGTH(w)) time.
-      // It's critical that this step operate quickly as
-      // "Self" still holds the outer-lock, restricting parallelism
-      // and effectively lengthening the critical section.
-      // Invariant: s chases t chases u.
-      // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
-      // we have faster access to the tail.
+      assert(w != NULL, "invariant");
 
-      if (QMode == 1) {
-         // QMode == 1 : drain cxq to EntryList, reversing order
-         // We also reverse the order of the list.
-         ObjectWaiter * s = NULL;
-         ObjectWaiter * t = w;
-         ObjectWaiter * u = NULL;
-         while (t != NULL) {
-             guarantee(t->TState == ObjectWaiter::TS_CXQ, "invariant");
-             t->TState = ObjectWaiter::TS_ENTER;
-             u = t->_next;
-             t->_prev = u;
-             t->_next = s;
-             s = t;
-             t = u;
-         }
-         _EntryList  = s;
-         assert(s != NULL, "invariant");
-      } else {
-         // QMode == 0 or QMode == 2
-         _EntryList = w;
-         ObjectWaiter * q = NULL;
-         ObjectWaiter * p;
-         for (p = w; p != NULL; p = p->_next) {
-             guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
-             p->TState = ObjectWaiter::TS_ENTER;
-             p->_prev = q;
-             q = p;
-         }
+      ObjectWaiter * q = NULL;
+      ObjectWaiter * p;
+      for (p = w; p != NULL; p = p->_next) {
+        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
+        p->TState = ObjectWaiter::TS_ENTER;
+        p->_prev = q;
+        q = p;
       }
 
-      // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
-      // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
+      // Prepend the RATs to the EntryList
+      if (_EntryList != NULL) {
+        q->_next = _EntryList;
+        _EntryList->_prev = q;
+      }
+      _EntryList = w;
+
+      // Fall thru into code that tries to wake a successor from EntryList
+    }
 
-      // See if we can abdicate to a spinner instead of waking a thread.
-      // A primary goal of the implementation is to reduce the
-      // context-switch rate.
-      if (_succ != NULL) continue;
+    w = _EntryList;
+    if (w != NULL) {
+      // I'd like to write: guarantee (w->_thread != Self).
+      // But in practice an exiting thread may find itself on the EntryList.
+      // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
+      // then calls exit().  Exit release the lock by setting O._owner to NULL.
+      // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
+      // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
+      // release the lock "O".  T2 resumes immediately after the ST of null into
+      // _owner, above.  T2 notices that the EntryList is populated, so it
+      // reacquires the lock and then finds itself on the EntryList.
+      // Given all that, we have to tolerate the circumstance where "w" is
+      // associated with Self.
+      assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
+      ExitEpilog(Self, w);
+      return;
+    }
+
+    // If we find that both _cxq and EntryList are null then just
+    // re-run the exit protocol from the top.
+    w = _cxq;
+    if (w == NULL) continue;
+
+    // Drain _cxq into EntryList - bulk transfer.
+    // First, detach _cxq.
+    // The following loop is tantamount to: w = swap(&cxq, NULL)
+    for (;;) {
+      assert(w != NULL, "Invariant");
+      ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+      if (u == w) break;
+      w = u;
+    }
+    TEVENT(Inflated exit - drain cxq into EntryList);
+
+    assert(w != NULL, "invariant");
+    assert(_EntryList == NULL, "invariant");
 
-      w = _EntryList;
-      if (w != NULL) {
-          guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
-          ExitEpilog(Self, w);
-          return;
+    // Convert the LIFO SLL anchored by _cxq into a DLL.
+    // The list reorganization step operates in O(LENGTH(w)) time.
+    // It's critical that this step operate quickly as
+    // "Self" still holds the outer-lock, restricting parallelism
+    // and effectively lengthening the critical section.
+    // Invariant: s chases t chases u.
+    // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
+    // we have faster access to the tail.
+
+    if (QMode == 1) {
+      // QMode == 1 : drain cxq to EntryList, reversing order
+      // We also reverse the order of the list.
+      ObjectWaiter * s = NULL;
+      ObjectWaiter * t = w;
+      ObjectWaiter * u = NULL;
+      while (t != NULL) {
+        guarantee(t->TState == ObjectWaiter::TS_CXQ, "invariant");
+        t->TState = ObjectWaiter::TS_ENTER;
+        u = t->_next;
+        t->_prev = u;
+        t->_next = s;
+        s = t;
+        t = u;
       }
-   }
+      _EntryList  = s;
+      assert(s != NULL, "invariant");
+    } else {
+      // QMode == 0 or QMode == 2
+      _EntryList = w;
+      ObjectWaiter * q = NULL;
+      ObjectWaiter * p;
+      for (p = w; p != NULL; p = p->_next) {
+        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
+        p->TState = ObjectWaiter::TS_ENTER;
+        p->_prev = q;
+        q = p;
+      }
+    }
+
+    // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
+    // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
+
+    // See if we can abdicate to a spinner instead of waking a thread.
+    // A primary goal of the implementation is to reduce the
+    // context-switch rate.
+    if (_succ != NULL) continue;
+
+    w = _EntryList;
+    if (w != NULL) {
+      guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
+      ExitEpilog(Self, w);
+      return;
+    }
+  }
 }
 
 // ExitSuspendEquivalent:
@@ -1277,53 +1275,53 @@
 // decreased. - Dave
 
 
-bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
-   const int Mode = Knob_FastHSSEC;
-   if (Mode && !jSelf->is_external_suspend()) {
-      assert(jSelf->is_suspend_equivalent(), "invariant");
-      jSelf->clear_suspend_equivalent();
-      if (2 == Mode) OrderAccess::storeload();
-      if (!jSelf->is_external_suspend()) return false;
-      // We raced a suspension -- fall thru into the slow path
-      TEVENT(ExitSuspendEquivalent - raced);
-      jSelf->set_suspend_equivalent();
-   }
-   return jSelf->handle_special_suspend_equivalent_condition();
+bool ObjectMonitor::ExitSuspendEquivalent(JavaThread * jSelf) {
+  const int Mode = Knob_FastHSSEC;
+  if (Mode && !jSelf->is_external_suspend()) {
+    assert(jSelf->is_suspend_equivalent(), "invariant");
+    jSelf->clear_suspend_equivalent();
+    if (2 == Mode) OrderAccess::storeload();
+    if (!jSelf->is_external_suspend()) return false;
+    // We raced a suspension -- fall thru into the slow path
+    TEVENT(ExitSuspendEquivalent - raced);
+    jSelf->set_suspend_equivalent();
+  }
+  return jSelf->handle_special_suspend_equivalent_condition();
 }
 
 
-void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
-   assert(_owner == Self, "invariant");
+void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
+  assert(_owner == Self, "invariant");
 
-   // Exit protocol:
-   // 1. ST _succ = wakee
-   // 2. membar #loadstore|#storestore;
-   // 2. ST _owner = NULL
-   // 3. unpark(wakee)
+  // Exit protocol:
+  // 1. ST _succ = wakee
+  // 2. membar #loadstore|#storestore;
+  // 2. ST _owner = NULL
+  // 3. unpark(wakee)
 
-   _succ = Knob_SuccEnabled ? Wakee->_thread : NULL;
-   ParkEvent * Trigger = Wakee->_event;
+  _succ = Knob_SuccEnabled ? Wakee->_thread : NULL;
+  ParkEvent * Trigger = Wakee->_event;
 
-   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
-   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
-   // out-of-scope (non-extant).
-   Wakee  = NULL;
+  // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
+  // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
+  // out-of-scope (non-extant).
+  Wakee  = NULL;
 
-   // Drop the lock
-   OrderAccess::release_store_ptr(&_owner, NULL);
-   OrderAccess::fence();                               // ST _owner vs LD in unpark()
+  // Drop the lock
+  OrderAccess::release_store_ptr(&_owner, NULL);
+  OrderAccess::fence();                               // ST _owner vs LD in unpark()
 
-   if (SafepointSynchronize::do_call_back()) {
-      TEVENT(unpark before SAFEPOINT);
-   }
+  if (SafepointSynchronize::do_call_back()) {
+    TEVENT(unpark before SAFEPOINT);
+  }
 
-   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
-   Trigger->unpark();
+  DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
+  Trigger->unpark();
 
-   // Maintain stats and report events to JVMTI
-   if (ObjectMonitor::_sync_Parks != NULL) {
-      ObjectMonitor::_sync_Parks->inc();
-   }
+  // Maintain stats and report events to JVMTI
+  if (ObjectMonitor::_sync_Parks != NULL) {
+    ObjectMonitor::_sync_Parks->inc();
+  }
 }
 
 
@@ -1337,41 +1335,41 @@
 // inflated monitor, e.g. the monitor can be inflated by a non-owning
 // thread due to contention.
 intptr_t ObjectMonitor::complete_exit(TRAPS) {
-   Thread * const Self = THREAD;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "Must be Java thread!");
+  JavaThread *jt = (JavaThread *)THREAD;
 
-   DeferredInitialize();
+  DeferredInitialize();
 
-   if (THREAD != _owner) {
+  if (THREAD != _owner) {
     if (THREAD->is_lock_owned ((address)_owner)) {
-       assert(_recursions == 0, "internal state error");
-       _owner = THREAD;   /* Convert from basiclock addr to Thread addr */
-       _recursions = 0;
-       OwnerIsThread = 1;
+      assert(_recursions == 0, "internal state error");
+      _owner = THREAD;   // Convert from basiclock addr to Thread addr
+      _recursions = 0;
+      OwnerIsThread = 1;
     }
-   }
+  }
 
-   guarantee(Self == _owner, "complete_exit not owner");
-   intptr_t save = _recursions; // record the old recursion count
-   _recursions = 0;        // set the recursion level to be 0
-   exit(true, Self);           // exit the monitor
-   guarantee(_owner != Self, "invariant");
-   return save;
+  guarantee(Self == _owner, "complete_exit not owner");
+  intptr_t save = _recursions; // record the old recursion count
+  _recursions = 0;        // set the recursion level to be 0
+  exit(true, Self);           // exit the monitor
+  guarantee(_owner != Self, "invariant");
+  return save;
 }
 
 // reenter() enters a lock and sets recursion count
 // complete_exit/reenter operate as a wait without waiting
 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
-   Thread * const Self = THREAD;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "Must be Java thread!");
+  JavaThread *jt = (JavaThread *)THREAD;
 
-   guarantee(_owner != Self, "reenter already owner");
-   enter(THREAD);       // enter the monitor
-   guarantee(_recursions == 0, "reenter recursion");
-   _recursions = recursions;
-   return;
+  guarantee(_owner != Self, "reenter already owner");
+  enter(THREAD);       // enter the monitor
+  guarantee(_recursions == 0, "reenter recursion");
+  _recursions = recursions;
+  return;
 }
 
 
@@ -1381,18 +1379,18 @@
 // which use this (which is why we don't put this into check_slow and
 // call it with a CHECK argument).
 
-#define CHECK_OWNER()                                                             \
-  do {                                                                            \
-    if (THREAD != _owner) {                                                       \
-      if (THREAD->is_lock_owned((address) _owner)) {                              \
-        _owner = THREAD;  /* Convert from basiclock addr to Thread addr */       \
-        _recursions = 0;                                                          \
-        OwnerIsThread = 1;                                                       \
-      } else {                                                                    \
-        TEVENT(Throw IMSX);                                                     \
-        THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
-      }                                                                           \
-    }                                                                             \
+#define CHECK_OWNER()                                                       \
+  do {                                                                      \
+    if (THREAD != _owner) {                                                 \
+      if (THREAD->is_lock_owned((address) _owner)) {                        \
+        _owner = THREAD;  /* Convert from basiclock addr to Thread addr */  \
+        _recursions = 0;                                                    \
+        OwnerIsThread = 1;                                                  \
+      } else {                                                              \
+        TEVENT(Throw IMSX);                                                 \
+        THROW(vmSymbols::java_lang_IllegalMonitorStateException());         \
+      }                                                                     \
+    }                                                                       \
   } while (false)
 
 // check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
@@ -1404,17 +1402,17 @@
   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
 }
 
-static int Adjust (volatile int * adr, int dx) {
+static int Adjust(volatile int * adr, int dx) {
   int v;
-  for (v = *adr; Atomic::cmpxchg(v + dx, adr, v) != v; v = *adr);
+  for (v = *adr; Atomic::cmpxchg(v + dx, adr, v) != v; v = *adr) /* empty */;
   return v;
 }
 
 // helper method for posting a monitor wait event
 void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
-                                                           jlong notifier_tid,
-                                                           jlong timeout,
-                                                           bool timedout) {
+                                            jlong notifier_tid,
+                                            jlong timeout,
+                                            bool timedout) {
   event->set_klass(((oop)this->object())->klass());
   event->set_timeout((TYPE_ULONG)timeout);
   event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
@@ -1429,232 +1427,230 @@
 // Note: a subset of changes to ObjectMonitor::wait()
 // will need to be replicated in complete_exit
 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
-   Thread * const Self = THREAD;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "Must be Java thread!");
+  JavaThread *jt = (JavaThread *)THREAD;
 
-   DeferredInitialize();
+  DeferredInitialize();
 
-   // Throw IMSX or IEX.
-   CHECK_OWNER();
+  // Throw IMSX or IEX.
+  CHECK_OWNER();
 
-   EventJavaMonitorWait event;
+  EventJavaMonitorWait event;
 
-   // check for a pending interrupt
-   if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
-     // post monitor waited event.  Note that this is past-tense, we are done waiting.
-     if (JvmtiExport::should_post_monitor_waited()) {
-        // Note: 'false' parameter is passed here because the
-        // wait was not timed out due to thread interrupt.
-        JvmtiExport::post_monitor_waited(jt, this, false);
+  // check for a pending interrupt
+  if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+    // post monitor waited event.  Note that this is past-tense, we are done waiting.
+    if (JvmtiExport::should_post_monitor_waited()) {
+      // Note: 'false' parameter is passed here because the
+      // wait was not timed out due to thread interrupt.
+      JvmtiExport::post_monitor_waited(jt, this, false);
 
-        // In this short circuit of the monitor wait protocol, the
-        // current thread never drops ownership of the monitor and
-        // never gets added to the wait queue so the current thread
-        // cannot be made the successor. This means that the
-        // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
-        // consume an unpark() meant for the ParkEvent associated with
-        // this ObjectMonitor.
-     }
-     if (event.should_commit()) {
-       post_monitor_wait_event(&event, 0, millis, false);
-     }
-     TEVENT(Wait - Throw IEX);
-     THROW(vmSymbols::java_lang_InterruptedException());
-     return;
-   }
+      // In this short circuit of the monitor wait protocol, the
+      // current thread never drops ownership of the monitor and
+      // never gets added to the wait queue so the current thread
+      // cannot be made the successor. This means that the
+      // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
+      // consume an unpark() meant for the ParkEvent associated with
+      // this ObjectMonitor.
+    }
+    if (event.should_commit()) {
+      post_monitor_wait_event(&event, 0, millis, false);
+    }
+    TEVENT(Wait - Throw IEX);
+    THROW(vmSymbols::java_lang_InterruptedException());
+    return;
+  }
 
-   TEVENT(Wait);
+  TEVENT(Wait);
 
-   assert(Self->_Stalled == 0, "invariant");
-   Self->_Stalled = intptr_t(this);
-   jt->set_current_waiting_monitor(this);
+  assert(Self->_Stalled == 0, "invariant");
+  Self->_Stalled = intptr_t(this);
+  jt->set_current_waiting_monitor(this);
 
-   // create a node to be put into the queue
-   // Critically, after we reset() the event but prior to park(), we must check
-   // for a pending interrupt.
-   ObjectWaiter node(Self);
-   node.TState = ObjectWaiter::TS_WAIT;
-   Self->_ParkEvent->reset();
-   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
+  // create a node to be put into the queue
+  // Critically, after we reset() the event but prior to park(), we must check
+  // for a pending interrupt.
+  ObjectWaiter node(Self);
+  node.TState = ObjectWaiter::TS_WAIT;
+  Self->_ParkEvent->reset();
+  OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
 
-   // Enter the waiting queue, which is a circular doubly linked list in this case
-   // but it could be a priority queue or any data structure.
-   // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
-   // by the the owner of the monitor *except* in the case where park()
-   // returns because of a timeout of interrupt.  Contention is exceptionally rare
-   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
+  // Enter the waiting queue, which is a circular doubly linked list in this case
+  // but it could be a priority queue or any data structure.
+  // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
+  // by the the owner of the monitor *except* in the case where park()
+  // returns because of a timeout of interrupt.  Contention is exceptionally rare
+  // so we use a simple spin-lock instead of a heavier-weight blocking lock.
 
-   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
-   AddWaiter(&node);
-   Thread::SpinRelease(&_WaitSetLock);
+  Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
+  AddWaiter(&node);
+  Thread::SpinRelease(&_WaitSetLock);
 
-   if ((SyncFlags & 4) == 0) {
-      _Responsible = NULL;
-   }
-   intptr_t save = _recursions; // record the old recursion count
-   _waiters++;                  // increment the number of waiters
-   _recursions = 0;             // set the recursion level to be 1
-   exit(true, Self);                    // exit the monitor
-   guarantee(_owner != Self, "invariant");
+  if ((SyncFlags & 4) == 0) {
+    _Responsible = NULL;
+  }
+  intptr_t save = _recursions; // record the old recursion count
+  _waiters++;                  // increment the number of waiters
+  _recursions = 0;             // set the recursion level to be 1
+  exit(true, Self);                    // exit the monitor
+  guarantee(_owner != Self, "invariant");
 
-   // The thread is on the WaitSet list - now park() it.
-   // On MP systems it's conceivable that a brief spin before we park
-   // could be profitable.
-   //
-   // TODO-FIXME: change the following logic to a loop of the form
-   //   while (!timeout && !interrupted && _notified == 0) park()
+  // The thread is on the WaitSet list - now park() it.
+  // On MP systems it's conceivable that a brief spin before we park
+  // could be profitable.
+  //
+  // TODO-FIXME: change the following logic to a loop of the form
+  //   while (!timeout && !interrupted && _notified == 0) park()
 
-   int ret = OS_OK;
-   int WasNotified = 0;
-   { // State transition wrappers
-     OSThread* osthread = Self->osthread();
-     OSThreadWaitState osts(osthread, true);
-     {
-       ThreadBlockInVM tbivm(jt);
-       // Thread is in thread_blocked state and oop access is unsafe.
-       jt->set_suspend_equivalent();
+  int ret = OS_OK;
+  int WasNotified = 0;
+  { // State transition wrappers
+    OSThread* osthread = Self->osthread();
+    OSThreadWaitState osts(osthread, true);
+    {
+      ThreadBlockInVM tbivm(jt);
+      // Thread is in thread_blocked state and oop access is unsafe.
+      jt->set_suspend_equivalent();
 
-       if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
-           // Intentionally empty
-       } else
-       if (node._notified == 0) {
-         if (millis <= 0) {
-            Self->_ParkEvent->park();
-         } else {
-            ret = Self->_ParkEvent->park(millis);
-         }
-       }
+      if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
+        // Intentionally empty
+      } else if (node._notified == 0) {
+        if (millis <= 0) {
+          Self->_ParkEvent->park();
+        } else {
+          ret = Self->_ParkEvent->park(millis);
+        }
+      }
 
-       // were we externally suspended while we were waiting?
-       if (ExitSuspendEquivalent (jt)) {
-          // TODO-FIXME: add -- if succ == Self then succ = null.
-          jt->java_suspend_self();
-       }
+      // were we externally suspended while we were waiting?
+      if (ExitSuspendEquivalent (jt)) {
+        // TODO-FIXME: add -- if succ == Self then succ = null.
+        jt->java_suspend_self();
+      }
 
-     } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
-
+    } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
 
-     // Node may be on the WaitSet, the EntryList (or cxq), or in transition
-     // from the WaitSet to the EntryList.
-     // See if we need to remove Node from the WaitSet.
-     // We use double-checked locking to avoid grabbing _WaitSetLock
-     // if the thread is not on the wait queue.
-     //
-     // Note that we don't need a fence before the fetch of TState.
-     // In the worst case we'll fetch a old-stale value of TS_WAIT previously
-     // written by the is thread. (perhaps the fetch might even be satisfied
-     // by a look-aside into the processor's own store buffer, although given
-     // the length of the code path between the prior ST and this load that's
-     // highly unlikely).  If the following LD fetches a stale TS_WAIT value
-     // then we'll acquire the lock and then re-fetch a fresh TState value.
-     // That is, we fail toward safety.
+    // Node may be on the WaitSet, the EntryList (or cxq), or in transition
+    // from the WaitSet to the EntryList.
+    // See if we need to remove Node from the WaitSet.
+    // We use double-checked locking to avoid grabbing _WaitSetLock
+    // if the thread is not on the wait queue.
+    //
+    // Note that we don't need a fence before the fetch of TState.
+    // In the worst case we'll fetch a old-stale value of TS_WAIT previously
+    // written by the is thread. (perhaps the fetch might even be satisfied
+    // by a look-aside into the processor's own store buffer, although given
+    // the length of the code path between the prior ST and this load that's
+    // highly unlikely).  If the following LD fetches a stale TS_WAIT value
+    // then we'll acquire the lock and then re-fetch a fresh TState value.
+    // That is, we fail toward safety.
 
-     if (node.TState == ObjectWaiter::TS_WAIT) {
-         Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
-         if (node.TState == ObjectWaiter::TS_WAIT) {
-            DequeueSpecificWaiter(&node);       // unlink from WaitSet
-            assert(node._notified == 0, "invariant");
-            node.TState = ObjectWaiter::TS_RUN;
-         }
-         Thread::SpinRelease(&_WaitSetLock);
-     }
+    if (node.TState == ObjectWaiter::TS_WAIT) {
+      Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
+      if (node.TState == ObjectWaiter::TS_WAIT) {
+        DequeueSpecificWaiter(&node);       // unlink from WaitSet
+        assert(node._notified == 0, "invariant");
+        node.TState = ObjectWaiter::TS_RUN;
+      }
+      Thread::SpinRelease(&_WaitSetLock);
+    }
 
-     // The thread is now either on off-list (TS_RUN),
-     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
-     // The Node's TState variable is stable from the perspective of this thread.
-     // No other threads will asynchronously modify TState.
-     guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
-     OrderAccess::loadload();
-     if (_succ == Self) _succ = NULL;
-     WasNotified = node._notified;
+    // The thread is now either on off-list (TS_RUN),
+    // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
+    // The Node's TState variable is stable from the perspective of this thread.
+    // No other threads will asynchronously modify TState.
+    guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
+    OrderAccess::loadload();
+    if (_succ == Self) _succ = NULL;
+    WasNotified = node._notified;
 
-     // Reentry phase -- reacquire the monitor.
-     // re-enter contended monitor after object.wait().
-     // retain OBJECT_WAIT state until re-enter successfully completes
-     // Thread state is thread_in_vm and oop access is again safe,
-     // although the raw address of the object may have changed.
-     // (Don't cache naked oops over safepoints, of course).
+    // Reentry phase -- reacquire the monitor.
+    // re-enter contended monitor after object.wait().
+    // retain OBJECT_WAIT state until re-enter successfully completes
+    // Thread state is thread_in_vm and oop access is again safe,
+    // although the raw address of the object may have changed.
+    // (Don't cache naked oops over safepoints, of course).
 
-     // post monitor waited event. Note that this is past-tense, we are done waiting.
-     if (JvmtiExport::should_post_monitor_waited()) {
-       JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
+    // post monitor waited event. Note that this is past-tense, we are done waiting.
+    if (JvmtiExport::should_post_monitor_waited()) {
+      JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
 
-       if (node._notified != 0 && _succ == Self) {
-         // In this part of the monitor wait-notify-reenter protocol it
-         // is possible (and normal) for another thread to do a fastpath
-         // monitor enter-exit while this thread is still trying to get
-         // to the reenter portion of the protocol.
-         //
-         // The ObjectMonitor was notified and the current thread is
-         // the successor which also means that an unpark() has already
-         // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
-         // consume the unpark() that was done when the successor was
-         // set because the same ParkEvent is shared between Java
-         // monitors and JVM/TI RawMonitors (for now).
-         //
-         // We redo the unpark() to ensure forward progress, i.e., we
-         // don't want all pending threads hanging (parked) with none
-         // entering the unlocked monitor.
-         node._event->unpark();
-       }
-     }
+      if (node._notified != 0 && _succ == Self) {
+        // In this part of the monitor wait-notify-reenter protocol it
+        // is possible (and normal) for another thread to do a fastpath
+        // monitor enter-exit while this thread is still trying to get
+        // to the reenter portion of the protocol.
+        //
+        // The ObjectMonitor was notified and the current thread is
+        // the successor which also means that an unpark() has already
+        // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
+        // consume the unpark() that was done when the successor was
+        // set because the same ParkEvent is shared between Java
+        // monitors and JVM/TI RawMonitors (for now).
+        //
+        // We redo the unpark() to ensure forward progress, i.e., we
+        // don't want all pending threads hanging (parked) with none
+        // entering the unlocked monitor.
+        node._event->unpark();
+      }
+    }
 
-     if (event.should_commit()) {
-       post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
-     }
+    if (event.should_commit()) {
+      post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
+    }
 
-     OrderAccess::fence();
+    OrderAccess::fence();
 
-     assert(Self->_Stalled != 0, "invariant");
-     Self->_Stalled = 0;
+    assert(Self->_Stalled != 0, "invariant");
+    Self->_Stalled = 0;
 
-     assert(_owner != Self, "invariant");
-     ObjectWaiter::TStates v = node.TState;
-     if (v == ObjectWaiter::TS_RUN) {
-         enter(Self);
-     } else {
-         guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
-         ReenterI(Self, &node);
-         node.wait_reenter_end(this);
-     }
+    assert(_owner != Self, "invariant");
+    ObjectWaiter::TStates v = node.TState;
+    if (v == ObjectWaiter::TS_RUN) {
+      enter(Self);
+    } else {
+      guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
+      ReenterI(Self, &node);
+      node.wait_reenter_end(this);
+    }
 
-     // Self has reacquired the lock.
-     // Lifecycle - the node representing Self must not appear on any queues.
-     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
-     // want residual elements associated with this thread left on any lists.
-     guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
-     assert(_owner == Self, "invariant");
-     assert(_succ != Self , "invariant");
-   } // OSThreadWaitState()
+    // Self has reacquired the lock.
+    // Lifecycle - the node representing Self must not appear on any queues.
+    // Node is about to go out-of-scope, but even if it were immortal we wouldn't
+    // want residual elements associated with this thread left on any lists.
+    guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
+    assert(_owner == Self, "invariant");
+    assert(_succ != Self, "invariant");
+  } // OSThreadWaitState()
 
-   jt->set_current_waiting_monitor(NULL);
+  jt->set_current_waiting_monitor(NULL);
 
-   guarantee(_recursions == 0, "invariant");
-   _recursions = save;     // restore the old recursion count
-   _waiters--;             // decrement the number of waiters
+  guarantee(_recursions == 0, "invariant");
+  _recursions = save;     // restore the old recursion count
+  _waiters--;             // decrement the number of waiters
 
-   // Verify a few postconditions
-   assert(_owner == Self       , "invariant");
-   assert(_succ  != Self       , "invariant");
-   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+  // Verify a few postconditions
+  assert(_owner == Self, "invariant");
+  assert(_succ != Self, "invariant");
+  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 
-   if (SyncFlags & 32) {
-      OrderAccess::fence();
-   }
+  if (SyncFlags & 32) {
+    OrderAccess::fence();
+  }
 
-   // check if the notification happened
-   if (!WasNotified) {
-     // no, it could be timeout or Thread.interrupt() or both
-     // check for interrupt event, otherwise it is timeout
-     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
-       TEVENT(Wait - throw IEX from epilog);
-       THROW(vmSymbols::java_lang_InterruptedException());
-     }
-   }
+  // check if the notification happened
+  if (!WasNotified) {
+    // no, it could be timeout or Thread.interrupt() or both
+    // check for interrupt event, otherwise it is timeout
+    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+      TEVENT(Wait - throw IEX from epilog);
+      THROW(vmSymbols::java_lang_InterruptedException());
+    }
+  }
 
-   // NOTE: Spurious wake up will be consider as timeout.
-   // Monitor notify has precedence over thread interrupt.
+  // NOTE: Spurious wake up will be consider as timeout.
+  // Monitor notify has precedence over thread interrupt.
 }
 
 
@@ -1666,8 +1662,8 @@
 void ObjectMonitor::notify(TRAPS) {
   CHECK_OWNER();
   if (_WaitSet == NULL) {
-     TEVENT(Empty-Notify);
-     return;
+    TEVENT(Empty-Notify);
+    return;
   }
   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
 
@@ -1676,108 +1672,105 @@
   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
   ObjectWaiter * iterator = DequeueWaiter();
   if (iterator != NULL) {
-     TEVENT(Notify1 - Transfer);
-     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
-     guarantee(iterator->_notified == 0, "invariant");
-     if (Policy != 4) {
-        iterator->TState = ObjectWaiter::TS_ENTER;
-     }
-     iterator->_notified = 1;
-     Thread * Self = THREAD;
-     iterator->_notifier_tid = Self->osthread()->thread_id();
+    TEVENT(Notify1 - Transfer);
+    guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
+    guarantee(iterator->_notified == 0, "invariant");
+    if (Policy != 4) {
+      iterator->TState = ObjectWaiter::TS_ENTER;
+    }
+    iterator->_notified = 1;
+    Thread * Self = THREAD;
+    iterator->_notifier_tid = Self->osthread()->thread_id();
 
-     ObjectWaiter * List = _EntryList;
-     if (List != NULL) {
-        assert(List->_prev == NULL, "invariant");
-        assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
-        assert(List != iterator, "invariant");
-     }
+    ObjectWaiter * List = _EntryList;
+    if (List != NULL) {
+      assert(List->_prev == NULL, "invariant");
+      assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
+      assert(List != iterator, "invariant");
+    }
 
-     if (Policy == 0) {       // prepend to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-             List->_prev = iterator;
-             iterator->_next = List;
-             iterator->_prev = NULL;
-             _EntryList = iterator;
-        }
-     } else
-     if (Policy == 1) {      // append to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-            // CONSIDER:  finding the tail currently requires a linear-time walk of
-            // the EntryList.  We can make tail access constant-time by converting to
-            // a CDLL instead of using our current DLL.
-            ObjectWaiter * Tail;
-            for (Tail = List; Tail->_next != NULL; Tail = Tail->_next);
-            assert(Tail != NULL && Tail->_next == NULL, "invariant");
-            Tail->_next = iterator;
-            iterator->_prev = Tail;
-            iterator->_next = NULL;
-        }
-     } else
-     if (Policy == 2) {      // prepend to cxq
-         // prepend to cxq
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-            iterator->TState = ObjectWaiter::TS_CXQ;
-            for (;;) {
-                ObjectWaiter * Front = _cxq;
-                iterator->_next = Front;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
-                    break;
-                }
-            }
-         }
-     } else
-     if (Policy == 3) {      // append to cxq
+    if (Policy == 0) {       // prepend to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        List->_prev = iterator;
+        iterator->_next = List;
+        iterator->_prev = NULL;
+        _EntryList = iterator;
+      }
+    } else if (Policy == 1) {      // append to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        // CONSIDER:  finding the tail currently requires a linear-time walk of
+        // the EntryList.  We can make tail access constant-time by converting to
+        // a CDLL instead of using our current DLL.
+        ObjectWaiter * Tail;
+        for (Tail = List; Tail->_next != NULL; Tail = Tail->_next) /* empty */;
+        assert(Tail != NULL && Tail->_next == NULL, "invariant");
+        Tail->_next = iterator;
+        iterator->_prev = Tail;
+        iterator->_next = NULL;
+      }
+    } else if (Policy == 2) {      // prepend to cxq
+      // prepend to cxq
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
         iterator->TState = ObjectWaiter::TS_CXQ;
         for (;;) {
-            ObjectWaiter * Tail;
-            Tail = _cxq;
-            if (Tail == NULL) {
-                iterator->_next = NULL;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
-                   break;
-                }
-            } else {
-                while (Tail->_next != NULL) Tail = Tail->_next;
-                Tail->_next = iterator;
-                iterator->_prev = Tail;
-                iterator->_next = NULL;
-                break;
-            }
+          ObjectWaiter * Front = _cxq;
+          iterator->_next = Front;
+          if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+            break;
+          }
         }
-     } else {
-        ParkEvent * ev = iterator->_event;
-        iterator->TState = ObjectWaiter::TS_RUN;
-        OrderAccess::fence();
-        ev->unpark();
-     }
+      }
+    } else if (Policy == 3) {      // append to cxq
+      iterator->TState = ObjectWaiter::TS_CXQ;
+      for (;;) {
+        ObjectWaiter * Tail;
+        Tail = _cxq;
+        if (Tail == NULL) {
+          iterator->_next = NULL;
+          if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+            break;
+          }
+        } else {
+          while (Tail->_next != NULL) Tail = Tail->_next;
+          Tail->_next = iterator;
+          iterator->_prev = Tail;
+          iterator->_next = NULL;
+          break;
+        }
+      }
+    } else {
+      ParkEvent * ev = iterator->_event;
+      iterator->TState = ObjectWaiter::TS_RUN;
+      OrderAccess::fence();
+      ev->unpark();
+    }
 
-     if (Policy < 4) {
-       iterator->wait_reenter_begin(this);
-     }
+    if (Policy < 4) {
+      iterator->wait_reenter_begin(this);
+    }
 
-     // _WaitSetLock protects the wait queue, not the EntryList.  We could
-     // move the add-to-EntryList operation, above, outside the critical section
-     // protected by _WaitSetLock.  In practice that's not useful.  With the
-     // exception of  wait() timeouts and interrupts the monitor owner
-     // is the only thread that grabs _WaitSetLock.  There's almost no contention
-     // on _WaitSetLock so it's not profitable to reduce the length of the
-     // critical section.
+    // _WaitSetLock protects the wait queue, not the EntryList.  We could
+    // move the add-to-EntryList operation, above, outside the critical section
+    // protected by _WaitSetLock.  In practice that's not useful.  With the
+    // exception of  wait() timeouts and interrupts the monitor owner
+    // is the only thread that grabs _WaitSetLock.  There's almost no contention
+    // on _WaitSetLock so it's not profitable to reduce the length of the
+    // critical section.
   }
 
   Thread::SpinRelease(&_WaitSetLock);
 
   if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
-     ObjectMonitor::_sync_Notifications->inc();
+    ObjectMonitor::_sync_Notifications->inc();
   }
 }
 
@@ -1786,8 +1779,8 @@
   CHECK_OWNER();
   ObjectWaiter* iterator;
   if (_WaitSet == NULL) {
-      TEVENT(Empty-NotifyAll);
-      return;
+    TEVENT(Empty-NotifyAll);
+    return;
   }
   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
 
@@ -1796,112 +1789,109 @@
   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notifyall");
 
   for (;;) {
-     iterator = DequeueWaiter();
-     if (iterator == NULL) break;
-     TEVENT(NotifyAll - Transfer1);
-     ++Tally;
+    iterator = DequeueWaiter();
+    if (iterator == NULL) break;
+    TEVENT(NotifyAll - Transfer1);
+    ++Tally;
+
+    // Disposition - what might we do with iterator ?
+    // a.  add it directly to the EntryList - either tail or head.
+    // b.  push it onto the front of the _cxq.
+    // For now we use (a).
 
-     // Disposition - what might we do with iterator ?
-     // a.  add it directly to the EntryList - either tail or head.
-     // b.  push it onto the front of the _cxq.
-     // For now we use (a).
+    guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
+    guarantee(iterator->_notified == 0, "invariant");
+    iterator->_notified = 1;
+    Thread * Self = THREAD;
+    iterator->_notifier_tid = Self->osthread()->thread_id();
+    if (Policy != 4) {
+      iterator->TState = ObjectWaiter::TS_ENTER;
+    }
+
+    ObjectWaiter * List = _EntryList;
+    if (List != NULL) {
+      assert(List->_prev == NULL, "invariant");
+      assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
+      assert(List != iterator, "invariant");
+    }
 
-     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
-     guarantee(iterator->_notified == 0, "invariant");
-     iterator->_notified = 1;
-     Thread * Self = THREAD;
-     iterator->_notifier_tid = Self->osthread()->thread_id();
-     if (Policy != 4) {
-        iterator->TState = ObjectWaiter::TS_ENTER;
-     }
-
-     ObjectWaiter * List = _EntryList;
-     if (List != NULL) {
-        assert(List->_prev == NULL, "invariant");
-        assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
-        assert(List != iterator, "invariant");
-     }
-
-     if (Policy == 0) {       // prepend to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-             List->_prev = iterator;
-             iterator->_next = List;
-             iterator->_prev = NULL;
-             _EntryList = iterator;
+    if (Policy == 0) {       // prepend to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        List->_prev = iterator;
+        iterator->_next = List;
+        iterator->_prev = NULL;
+        _EntryList = iterator;
+      }
+    } else if (Policy == 1) {      // append to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        // CONSIDER:  finding the tail currently requires a linear-time walk of
+        // the EntryList.  We can make tail access constant-time by converting to
+        // a CDLL instead of using our current DLL.
+        ObjectWaiter * Tail;
+        for (Tail = List; Tail->_next != NULL; Tail = Tail->_next) /* empty */;
+        assert(Tail != NULL && Tail->_next == NULL, "invariant");
+        Tail->_next = iterator;
+        iterator->_prev = Tail;
+        iterator->_next = NULL;
+      }
+    } else if (Policy == 2) {      // prepend to cxq
+      // prepend to cxq
+      iterator->TState = ObjectWaiter::TS_CXQ;
+      for (;;) {
+        ObjectWaiter * Front = _cxq;
+        iterator->_next = Front;
+        if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+          break;
         }
-     } else
-     if (Policy == 1) {      // append to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-            // CONSIDER:  finding the tail currently requires a linear-time walk of
-            // the EntryList.  We can make tail access constant-time by converting to
-            // a CDLL instead of using our current DLL.
-            ObjectWaiter * Tail;
-            for (Tail = List; Tail->_next != NULL; Tail = Tail->_next);
-            assert(Tail != NULL && Tail->_next == NULL, "invariant");
-            Tail->_next = iterator;
-            iterator->_prev = Tail;
-            iterator->_next = NULL;
+      }
+    } else if (Policy == 3) {      // append to cxq
+      iterator->TState = ObjectWaiter::TS_CXQ;
+      for (;;) {
+        ObjectWaiter * Tail;
+        Tail = _cxq;
+        if (Tail == NULL) {
+          iterator->_next = NULL;
+          if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+            break;
+          }
+        } else {
+          while (Tail->_next != NULL) Tail = Tail->_next;
+          Tail->_next = iterator;
+          iterator->_prev = Tail;
+          iterator->_next = NULL;
+          break;
         }
-     } else
-     if (Policy == 2) {      // prepend to cxq
-         // prepend to cxq
-         iterator->TState = ObjectWaiter::TS_CXQ;
-         for (;;) {
-             ObjectWaiter * Front = _cxq;
-             iterator->_next = Front;
-             if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
-                 break;
-             }
-         }
-     } else
-     if (Policy == 3) {      // append to cxq
-        iterator->TState = ObjectWaiter::TS_CXQ;
-        for (;;) {
-            ObjectWaiter * Tail;
-            Tail = _cxq;
-            if (Tail == NULL) {
-                iterator->_next = NULL;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
-                   break;
-                }
-            } else {
-                while (Tail->_next != NULL) Tail = Tail->_next;
-                Tail->_next = iterator;
-                iterator->_prev = Tail;
-                iterator->_next = NULL;
-                break;
-            }
-        }
-     } else {
-        ParkEvent * ev = iterator->_event;
-        iterator->TState = ObjectWaiter::TS_RUN;
-        OrderAccess::fence();
-        ev->unpark();
-     }
+      }
+    } else {
+      ParkEvent * ev = iterator->_event;
+      iterator->TState = ObjectWaiter::TS_RUN;
+      OrderAccess::fence();
+      ev->unpark();
+    }
 
-     if (Policy < 4) {
-       iterator->wait_reenter_begin(this);
-     }
+    if (Policy < 4) {
+      iterator->wait_reenter_begin(this);
+    }
 
-     // _WaitSetLock protects the wait queue, not the EntryList.  We could
-     // move the add-to-EntryList operation, above, outside the critical section
-     // protected by _WaitSetLock.  In practice that's not useful.  With the
-     // exception of  wait() timeouts and interrupts the monitor owner
-     // is the only thread that grabs _WaitSetLock.  There's almost no contention
-     // on _WaitSetLock so it's not profitable to reduce the length of the
-     // critical section.
+    // _WaitSetLock protects the wait queue, not the EntryList.  We could
+    // move the add-to-EntryList operation, above, outside the critical section
+    // protected by _WaitSetLock.  In practice that's not useful.  With the
+    // exception of  wait() timeouts and interrupts the monitor owner
+    // is the only thread that grabs _WaitSetLock.  There's almost no contention
+    // on _WaitSetLock so it's not profitable to reduce the length of the
+    // critical section.
   }
 
   Thread::SpinRelease(&_WaitSetLock);
 
   if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
-     ObjectMonitor::_sync_Notifications->inc(Tally);
+    ObjectMonitor::_sync_Notifications->inc(Tally);
   }
 }
 
@@ -1969,7 +1959,6 @@
 // situation is not dire.  The state is benign -- there's no need to add
 // hysteresis control to damp the transition rate between spinning and
 // not spinning.
-//
 
 intptr_t ObjectMonitor::SpinCallbackArgument = 0;
 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL;
@@ -1977,229 +1966,228 @@
 // Spinning: Fixed frequency (100%), vary duration
 
 
-int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
+int ObjectMonitor::TrySpin_VaryDuration(Thread * Self) {
+  // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
+  int ctr = Knob_FixedSpin;
+  if (ctr != 0) {
+    while (--ctr >= 0) {
+      if (TryLock(Self) > 0) return 1;
+      SpinPause();
+    }
+    return 0;
+  }
+
+  for (ctr = Knob_PreSpin + 1; --ctr >= 0;) {
+    if (TryLock(Self) > 0) {
+      // Increase _SpinDuration ...
+      // Note that we don't clamp SpinDuration precisely at SpinLimit.
+      // Raising _SpurDuration to the poverty line is key.
+      int x = _SpinDuration;
+      if (x < Knob_SpinLimit) {
+        if (x < Knob_Poverty) x = Knob_Poverty;
+        _SpinDuration = x + Knob_BonusB;
+      }
+      return 1;
+    }
+    SpinPause();
+  }
+
+  // Admission control - verify preconditions for spinning
+  //
+  // We always spin a little bit, just to prevent _SpinDuration == 0 from
+  // becoming an absorbing state.  Put another way, we spin briefly to
+  // sample, just in case the system load, parallelism, contention, or lock
+  // modality changed.
+  //
+  // Consider the following alternative:
+  // Periodically set _SpinDuration = _SpinLimit and try a long/full
+  // spin attempt.  "Periodically" might mean after a tally of
+  // the # of failed spin attempts (or iterations) reaches some threshold.
+  // This takes us into the realm of 1-out-of-N spinning, where we
+  // hold the duration constant but vary the frequency.
+
+  ctr = _SpinDuration;
+  if (ctr < Knob_SpinBase) ctr = Knob_SpinBase;
+  if (ctr <= 0) return 0;
+
+  if (Knob_SuccRestrict && _succ != NULL) return 0;
+  if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
+    TEVENT(Spin abort - notrunnable [TOP]);
+    return 0;
+  }
 
-    // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
-    int ctr = Knob_FixedSpin;
-    if (ctr != 0) {
-        while (--ctr >= 0) {
-            if (TryLock(Self) > 0) return 1;
-            SpinPause();
-        }
-        return 0;
+  int MaxSpin = Knob_MaxSpinners;
+  if (MaxSpin >= 0) {
+    if (_Spinner > MaxSpin) {
+      TEVENT(Spin abort -- too many spinners);
+      return 0;
+    }
+    // Slightly racy, but benign ...
+    Adjust(&_Spinner, 1);
+  }
+
+  // We're good to spin ... spin ingress.
+  // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
+  // when preparing to LD...CAS _owner, etc and the CAS is likely
+  // to succeed.
+  int hits    = 0;
+  int msk     = 0;
+  int caspty  = Knob_CASPenalty;
+  int oxpty   = Knob_OXPenalty;
+  int sss     = Knob_SpinSetSucc;
+  if (sss && _succ == NULL) _succ = Self;
+  Thread * prv = NULL;
+
+  // There are three ways to exit the following loop:
+  // 1.  A successful spin where this thread has acquired the lock.
+  // 2.  Spin failure with prejudice
+  // 3.  Spin failure without prejudice
+
+  while (--ctr >= 0) {
+
+    // Periodic polling -- Check for pending GC
+    // Threads may spin while they're unsafe.
+    // We don't want spinning threads to delay the JVM from reaching
+    // a stop-the-world safepoint or to steal cycles from GC.
+    // If we detect a pending safepoint we abort in order that
+    // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
+    // this thread, if safe, doesn't steal cycles from GC.
+    // This is in keeping with the "no loitering in runtime" rule.
+    // We periodically check to see if there's a safepoint pending.
+    if ((ctr & 0xFF) == 0) {
+      if (SafepointSynchronize::do_call_back()) {
+        TEVENT(Spin: safepoint);
+        goto Abort;           // abrupt spin egress
+      }
+      if (Knob_UsePause & 1) SpinPause();
+
+      int (*scb)(intptr_t,int) = SpinCallbackFunction;
+      if (hits > 50 && scb != NULL) {
+        int abend = (*scb)(SpinCallbackArgument, 0);
+      }
     }
 
-    for (ctr = Knob_PreSpin + 1; --ctr >= 0;) {
-      if (TryLock(Self) > 0) {
-        // Increase _SpinDuration ...
+    if (Knob_UsePause & 2) SpinPause();
+
+    // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
+    // This is useful on classic SMP systems, but is of less utility on
+    // N1-style CMT platforms.
+    //
+    // Trade-off: lock acquisition latency vs coherency bandwidth.
+    // Lock hold times are typically short.  A histogram
+    // of successful spin attempts shows that we usually acquire
+    // the lock early in the spin.  That suggests we want to
+    // sample _owner frequently in the early phase of the spin,
+    // but then back-off and sample less frequently as the spin
+    // progresses.  The back-off makes a good citizen on SMP big
+    // SMP systems.  Oversampling _owner can consume excessive
+    // coherency bandwidth.  Relatedly, if we _oversample _owner we
+    // can inadvertently interfere with the the ST m->owner=null.
+    // executed by the lock owner.
+    if (ctr & msk) continue;
+    ++hits;
+    if ((hits & 0xF) == 0) {
+      // The 0xF, above, corresponds to the exponent.
+      // Consider: (msk+1)|msk
+      msk = ((msk << 2)|3) & BackOffMask;
+    }
+
+    // Probe _owner with TATAS
+    // If this thread observes the monitor transition or flicker
+    // from locked to unlocked to locked, then the odds that this
+    // thread will acquire the lock in this spin attempt go down
+    // considerably.  The same argument applies if the CAS fails
+    // or if we observe _owner change from one non-null value to
+    // another non-null value.   In such cases we might abort
+    // the spin without prejudice or apply a "penalty" to the
+    // spin count-down variable "ctr", reducing it by 100, say.
+
+    Thread * ox = (Thread *) _owner;
+    if (ox == NULL) {
+      ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL);
+      if (ox == NULL) {
+        // The CAS succeeded -- this thread acquired ownership
+        // Take care of some bookkeeping to exit spin state.
+        if (sss && _succ == Self) {
+          _succ = NULL;
+        }
+        if (MaxSpin > 0) Adjust(&_Spinner, -1);
+
+        // Increase _SpinDuration :
+        // The spin was successful (profitable) so we tend toward
+        // longer spin attempts in the future.
+        // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
+        // If we acquired the lock early in the spin cycle it
+        // makes sense to increase _SpinDuration proportionally.
         // Note that we don't clamp SpinDuration precisely at SpinLimit.
-        // Raising _SpurDuration to the poverty line is key.
         int x = _SpinDuration;
         if (x < Knob_SpinLimit) {
-           if (x < Knob_Poverty) x = Knob_Poverty;
-           _SpinDuration = x + Knob_BonusB;
+          if (x < Knob_Poverty) x = Knob_Poverty;
+          _SpinDuration = x + Knob_Bonus;
         }
         return 1;
       }
-      SpinPause();
-    }
 
-    // Admission control - verify preconditions for spinning
-    //
-    // We always spin a little bit, just to prevent _SpinDuration == 0 from
-    // becoming an absorbing state.  Put another way, we spin briefly to
-    // sample, just in case the system load, parallelism, contention, or lock
-    // modality changed.
-    //
-    // Consider the following alternative:
-    // Periodically set _SpinDuration = _SpinLimit and try a long/full
-    // spin attempt.  "Periodically" might mean after a tally of
-    // the # of failed spin attempts (or iterations) reaches some threshold.
-    // This takes us into the realm of 1-out-of-N spinning, where we
-    // hold the duration constant but vary the frequency.
-
-    ctr = _SpinDuration;
-    if (ctr < Knob_SpinBase) ctr = Knob_SpinBase;
-    if (ctr <= 0) return 0;
-
-    if (Knob_SuccRestrict && _succ != NULL) return 0;
-    if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
-       TEVENT(Spin abort - notrunnable [TOP]);
-       return 0;
-    }
-
-    int MaxSpin = Knob_MaxSpinners;
-    if (MaxSpin >= 0) {
-       if (_Spinner > MaxSpin) {
-          TEVENT(Spin abort -- too many spinners);
-          return 0;
-       }
-       // Slightly racy, but benign ...
-       Adjust(&_Spinner, 1);
+      // The CAS failed ... we can take any of the following actions:
+      // * penalize: ctr -= Knob_CASPenalty
+      // * exit spin with prejudice -- goto Abort;
+      // * exit spin without prejudice.
+      // * Since CAS is high-latency, retry again immediately.
+      prv = ox;
+      TEVENT(Spin: cas failed);
+      if (caspty == -2) break;
+      if (caspty == -1) goto Abort;
+      ctr -= caspty;
+      continue;
     }
 
-    // We're good to spin ... spin ingress.
-    // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
-    // when preparing to LD...CAS _owner, etc and the CAS is likely
-    // to succeed.
-    int hits    = 0;
-    int msk     = 0;
-    int caspty  = Knob_CASPenalty;
-    int oxpty   = Knob_OXPenalty;
-    int sss     = Knob_SpinSetSucc;
-    if (sss && _succ == NULL) _succ = Self;
-    Thread * prv = NULL;
-
-    // There are three ways to exit the following loop:
-    // 1.  A successful spin where this thread has acquired the lock.
-    // 2.  Spin failure with prejudice
-    // 3.  Spin failure without prejudice
-
-    while (--ctr >= 0) {
-
-      // Periodic polling -- Check for pending GC
-      // Threads may spin while they're unsafe.
-      // We don't want spinning threads to delay the JVM from reaching
-      // a stop-the-world safepoint or to steal cycles from GC.
-      // If we detect a pending safepoint we abort in order that
-      // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
-      // this thread, if safe, doesn't steal cycles from GC.
-      // This is in keeping with the "no loitering in runtime" rule.
-      // We periodically check to see if there's a safepoint pending.
-      if ((ctr & 0xFF) == 0) {
-         if (SafepointSynchronize::do_call_back()) {
-            TEVENT(Spin: safepoint);
-            goto Abort;           // abrupt spin egress
-         }
-         if (Knob_UsePause & 1) SpinPause();
-
-         int (*scb)(intptr_t,int) = SpinCallbackFunction;
-         if (hits > 50 && scb != NULL) {
-            int abend = (*scb)(SpinCallbackArgument, 0);
-         }
-      }
-
-      if (Knob_UsePause & 2) SpinPause();
-
-      // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
-      // This is useful on classic SMP systems, but is of less utility on
-      // N1-style CMT platforms.
-      //
-      // Trade-off: lock acquisition latency vs coherency bandwidth.
-      // Lock hold times are typically short.  A histogram
-      // of successful spin attempts shows that we usually acquire
-      // the lock early in the spin.  That suggests we want to
-      // sample _owner frequently in the early phase of the spin,
-      // but then back-off and sample less frequently as the spin
-      // progresses.  The back-off makes a good citizen on SMP big
-      // SMP systems.  Oversampling _owner can consume excessive
-      // coherency bandwidth.  Relatedly, if we _oversample _owner we
-      // can inadvertently interfere with the the ST m->owner=null.
-      // executed by the lock owner.
-      if (ctr & msk) continue;
-      ++hits;
-      if ((hits & 0xF) == 0) {
-        // The 0xF, above, corresponds to the exponent.
-        // Consider: (msk+1)|msk
-        msk = ((msk << 2)|3) & BackOffMask;
-      }
+    // Did lock ownership change hands ?
+    if (ox != prv && prv != NULL) {
+      TEVENT(spin: Owner changed)
+      if (oxpty == -2) break;
+      if (oxpty == -1) goto Abort;
+      ctr -= oxpty;
+    }
+    prv = ox;
 
-      // Probe _owner with TATAS
-      // If this thread observes the monitor transition or flicker
-      // from locked to unlocked to locked, then the odds that this
-      // thread will acquire the lock in this spin attempt go down
-      // considerably.  The same argument applies if the CAS fails
-      // or if we observe _owner change from one non-null value to
-      // another non-null value.   In such cases we might abort
-      // the spin without prejudice or apply a "penalty" to the
-      // spin count-down variable "ctr", reducing it by 100, say.
-
-      Thread * ox = (Thread *) _owner;
-      if (ox == NULL) {
-         ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL);
-         if (ox == NULL) {
-            // The CAS succeeded -- this thread acquired ownership
-            // Take care of some bookkeeping to exit spin state.
-            if (sss && _succ == Self) {
-               _succ = NULL;
-            }
-            if (MaxSpin > 0) Adjust(&_Spinner, -1);
-
-            // Increase _SpinDuration :
-            // The spin was successful (profitable) so we tend toward
-            // longer spin attempts in the future.
-            // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
-            // If we acquired the lock early in the spin cycle it
-            // makes sense to increase _SpinDuration proportionally.
-            // Note that we don't clamp SpinDuration precisely at SpinLimit.
-            int x = _SpinDuration;
-            if (x < Knob_SpinLimit) {
-                if (x < Knob_Poverty) x = Knob_Poverty;
-                _SpinDuration = x + Knob_Bonus;
-            }
-            return 1;
-         }
+    // Abort the spin if the owner is not executing.
+    // The owner must be executing in order to drop the lock.
+    // Spinning while the owner is OFFPROC is idiocy.
+    // Consider: ctr -= RunnablePenalty ;
+    if (Knob_OState && NotRunnable (Self, ox)) {
+      TEVENT(Spin abort - notrunnable);
+      goto Abort;
+    }
+    if (sss && _succ == NULL) _succ = Self;
+  }
 
-         // The CAS failed ... we can take any of the following actions:
-         // * penalize: ctr -= Knob_CASPenalty
-         // * exit spin with prejudice -- goto Abort;
-         // * exit spin without prejudice.
-         // * Since CAS is high-latency, retry again immediately.
-         prv = ox;
-         TEVENT(Spin: cas failed);
-         if (caspty == -2) break;
-         if (caspty == -1) goto Abort;
-         ctr -= caspty;
-         continue;
-      }
-
-      // Did lock ownership change hands ?
-      if (ox != prv && prv != NULL) {
-          TEVENT(spin: Owner changed)
-          if (oxpty == -2) break;
-          if (oxpty == -1) goto Abort;
-          ctr -= oxpty;
-      }
-      prv = ox;
-
-      // Abort the spin if the owner is not executing.
-      // The owner must be executing in order to drop the lock.
-      // Spinning while the owner is OFFPROC is idiocy.
-      // Consider: ctr -= RunnablePenalty ;
-      if (Knob_OState && NotRunnable (Self, ox)) {
-         TEVENT(Spin abort - notrunnable);
-         goto Abort;
-      }
-      if (sss && _succ == NULL) _succ = Self;
-   }
-
-   // Spin failed with prejudice -- reduce _SpinDuration.
-   // TODO: Use an AIMD-like policy to adjust _SpinDuration.
-   // AIMD is globally stable.
-   TEVENT(Spin failure);
-   {
-     int x = _SpinDuration;
-     if (x > 0) {
-        // Consider an AIMD scheme like: x -= (x >> 3) + 100
-        // This is globally sample and tends to damp the response.
-        x -= Knob_Penalty;
-        if (x < 0) x = 0;
-        _SpinDuration = x;
-     }
-   }
+  // Spin failed with prejudice -- reduce _SpinDuration.
+  // TODO: Use an AIMD-like policy to adjust _SpinDuration.
+  // AIMD is globally stable.
+  TEVENT(Spin failure);
+  {
+    int x = _SpinDuration;
+    if (x > 0) {
+      // Consider an AIMD scheme like: x -= (x >> 3) + 100
+      // This is globally sample and tends to damp the response.
+      x -= Knob_Penalty;
+      if (x < 0) x = 0;
+      _SpinDuration = x;
+    }
+  }
 
  Abort:
-   if (MaxSpin >= 0) Adjust(&_Spinner, -1);
-   if (sss && _succ == Self) {
-      _succ = NULL;
-      // Invariant: after setting succ=null a contending thread
-      // must recheck-retry _owner before parking.  This usually happens
-      // in the normal usage of TrySpin(), but it's safest
-      // to make TrySpin() as foolproof as possible.
-      OrderAccess::fence();
-      if (TryLock(Self) > 0) return 1;
-   }
-   return 0;
+  if (MaxSpin >= 0) Adjust(&_Spinner, -1);
+  if (sss && _succ == Self) {
+    _succ = NULL;
+    // Invariant: after setting succ=null a contending thread
+    // must recheck-retry _owner before parking.  This usually happens
+    // in the normal usage of TrySpin(), but it's safest
+    // to make TrySpin() as foolproof as possible.
+    OrderAccess::fence();
+    if (TryLock(Self) > 0) return 1;
+  }
+  return 0;
 }
 
 // NotRunnable() -- informed spinning
@@ -2241,30 +2229,30 @@
 // Spinning, in general, is probabilistic anyway.
 
 
-int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
-    // Check either OwnerIsThread or ox->TypeTag == 2BAD.
-    if (!OwnerIsThread) return 0;
+int ObjectMonitor::NotRunnable(Thread * Self, Thread * ox) {
+  // Check either OwnerIsThread or ox->TypeTag == 2BAD.
+  if (!OwnerIsThread) return 0;
 
-    if (ox == NULL) return 0;
+  if (ox == NULL) return 0;
 
-    // Avoid transitive spinning ...
-    // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
-    // Immediately after T1 acquires L it's possible that T2, also
-    // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
-    // This occurs transiently after T1 acquired L but before
-    // T1 managed to clear T1.Stalled.  T2 does not need to abort
-    // its spin in this circumstance.
-    intptr_t BlockedOn = SafeFetchN((intptr_t *) &ox->_Stalled, intptr_t(1));
+  // Avoid transitive spinning ...
+  // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
+  // Immediately after T1 acquires L it's possible that T2, also
+  // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
+  // This occurs transiently after T1 acquired L but before
+  // T1 managed to clear T1.Stalled.  T2 does not need to abort
+  // its spin in this circumstance.
+  intptr_t BlockedOn = SafeFetchN((intptr_t *) &ox->_Stalled, intptr_t(1));
 
-    if (BlockedOn == 1) return 1;
-    if (BlockedOn != 0) {
-      return BlockedOn != intptr_t(this) && _owner == ox;
-    }
+  if (BlockedOn == 1) return 1;
+  if (BlockedOn != 0) {
+    return BlockedOn != intptr_t(this) && _owner == ox;
+  }
 
-    assert(sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant");
-    int jst = SafeFetch32((int *) &((JavaThread *) ox)->_thread_state, -1);;
-    // consider also: jst != _thread_in_Java -- but that's overspecific.
-    return jst == _thread_blocked || jst == _thread_in_native;
+  assert(sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant");
+  int jst = SafeFetch32((int *) &((JavaThread *) ox)->_thread_state, -1);;
+  // consider also: jst != _thread_in_Java -- but that's overspecific.
+  return jst == _thread_blocked || jst == _thread_in_native;
 }
 
 
@@ -2377,28 +2365,37 @@
   assert(InitializationCompleted == 0, "invariant");
   InitializationCompleted = 1;
   if (UsePerfData) {
-      EXCEPTION_MARK;
-      #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
-      #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
-      NEWPERFCOUNTER(_sync_Inflations);
-      NEWPERFCOUNTER(_sync_Deflations);
-      NEWPERFCOUNTER(_sync_ContendedLockAttempts);
-      NEWPERFCOUNTER(_sync_FutileWakeups);
-      NEWPERFCOUNTER(_sync_Parks);
-      NEWPERFCOUNTER(_sync_EmptyNotifications);
-      NEWPERFCOUNTER(_sync_Notifications);
-      NEWPERFCOUNTER(_sync_SlowEnter);
-      NEWPERFCOUNTER(_sync_SlowExit);
-      NEWPERFCOUNTER(_sync_SlowNotify);
-      NEWPERFCOUNTER(_sync_SlowNotifyAll);
-      NEWPERFCOUNTER(_sync_FailedSpins);
-      NEWPERFCOUNTER(_sync_SuccessfulSpins);
-      NEWPERFCOUNTER(_sync_PrivateA);
-      NEWPERFCOUNTER(_sync_PrivateB);
-      NEWPERFCOUNTER(_sync_MonInCirculation);
-      NEWPERFCOUNTER(_sync_MonScavenged);
-      NEWPERFVARIABLE(_sync_MonExtant);
-      #undef NEWPERFCOUNTER
+    EXCEPTION_MARK;
+#define NEWPERFCOUNTER(n)                                                \
+  {                                                                      \
+    n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,  \
+                                        CHECK);                          \
+  }
+#define NEWPERFVARIABLE(n)                                                \
+  {                                                                       \
+    n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,  \
+                                         CHECK);                          \
+  }
+    NEWPERFCOUNTER(_sync_Inflations);
+    NEWPERFCOUNTER(_sync_Deflations);
+    NEWPERFCOUNTER(_sync_ContendedLockAttempts);
+    NEWPERFCOUNTER(_sync_FutileWakeups);
+    NEWPERFCOUNTER(_sync_Parks);
+    NEWPERFCOUNTER(_sync_EmptyNotifications);
+    NEWPERFCOUNTER(_sync_Notifications);
+    NEWPERFCOUNTER(_sync_SlowEnter);
+    NEWPERFCOUNTER(_sync_SlowExit);
+    NEWPERFCOUNTER(_sync_SlowNotify);
+    NEWPERFCOUNTER(_sync_SlowNotifyAll);
+    NEWPERFCOUNTER(_sync_FailedSpins);
+    NEWPERFCOUNTER(_sync_SuccessfulSpins);
+    NEWPERFCOUNTER(_sync_PrivateA);
+    NEWPERFCOUNTER(_sync_PrivateB);
+    NEWPERFCOUNTER(_sync_MonInCirculation);
+    NEWPERFCOUNTER(_sync_MonScavenged);
+    NEWPERFVARIABLE(_sync_MonExtant);
+#undef NEWPERFCOUNTER
+#undef NEWPERFVARIABLE
   }
 }
 
@@ -2416,34 +2413,34 @@
 }
 
 
-static char * kvGet (char * kvList, const char * Key) {
-    if (kvList == NULL) return NULL;
-    size_t n = strlen(Key);
-    char * Search;
-    for (Search = kvList; *Search; Search += strlen(Search) + 1) {
-        if (strncmp (Search, Key, n) == 0) {
-            if (Search[n] == '=') return Search + n + 1;
-            if (Search[n] == 0)   return(char *) "1";
-        }
+static char * kvGet(char * kvList, const char * Key) {
+  if (kvList == NULL) return NULL;
+  size_t n = strlen(Key);
+  char * Search;
+  for (Search = kvList; *Search; Search += strlen(Search) + 1) {
+    if (strncmp (Search, Key, n) == 0) {
+      if (Search[n] == '=') return Search + n + 1;
+      if (Search[n] == 0)   return(char *) "1";
     }
-    return NULL;
+  }
+  return NULL;
 }
 
-static int kvGetInt (char * kvList, const char * Key, int Default) {
-    char * v = kvGet(kvList, Key);
-    int rslt = v ? ::strtol(v, NULL, 0) : Default;
-    if (Knob_ReportSettings && v != NULL) {
-        ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
-        ::fflush(stdout);
-    }
-    return rslt;
+static int kvGetInt(char * kvList, const char * Key, int Default) {
+  char * v = kvGet(kvList, Key);
+  int rslt = v ? ::strtol(v, NULL, 0) : Default;
+  if (Knob_ReportSettings && v != NULL) {
+    ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
+    ::fflush(stdout);
+  }
+  return rslt;
 }
 
 void ObjectMonitor::DeferredInitialize() {
   if (InitDone > 0) return;
   if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
-      while (InitDone != 1);
-      return;
+    while (InitDone != 1) /* empty */;
+    return;
   }
 
   // One-shot global initialization ...
@@ -2457,16 +2454,16 @@
   size_t sz = strlen(SyncKnobs);
   char * knobs = (char *) malloc(sz + 2);
   if (knobs == NULL) {
-     vm_exit_out_of_memory(sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs");
-     guarantee(0, "invariant");
+    vm_exit_out_of_memory(sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs");
+    guarantee(0, "invariant");
   }
   strcpy(knobs, SyncKnobs);
   knobs[sz+1] = 0;
   for (char * p = knobs; *p; p++) {
-     if (*p == ':') *p = 0;
+    if (*p == ':') *p = 0;
   }
 
-  #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
+  #define SETKNOB(x) { Knob_##x = kvGetInt(knobs, #x, Knob_##x); }
   SETKNOB(ReportSettings);
   SETKNOB(Verbose);
   SETKNOB(VerifyInUse);
@@ -2502,18 +2499,18 @@
   }
 
   if (os::is_MP()) {
-     BackOffMask = (1 << Knob_SpinBackOff) - 1;
-     if (Knob_ReportSettings) ::printf("BackOffMask=%X\n", BackOffMask);
-     // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
+    BackOffMask = (1 << Knob_SpinBackOff) - 1;
+    if (Knob_ReportSettings) ::printf("BackOffMask=%X\n", BackOffMask);
+    // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
   } else {
-     Knob_SpinLimit = 0;
-     Knob_SpinBase  = 0;
-     Knob_PreSpin   = 0;
-     Knob_FixedSpin = -1;
+    Knob_SpinLimit = 0;
+    Knob_SpinBase  = 0;
+    Knob_PreSpin   = 0;
+    Knob_FixedSpin = -1;
   }
 
   if (Knob_LogSpins == 0) {
-     ObjectMonitor::_sync_FailedSpins = NULL;
+    ObjectMonitor::_sync_FailedSpins = NULL;
   }
 
   free(knobs);
--- a/hotspot/src/share/vm/runtime/objectMonitor.hpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.hpp	Wed Sep 10 17:06:36 2014 -0700
@@ -87,18 +87,18 @@
  public:
   // TODO-FIXME: the "offset" routines should return a type of off_t instead of int ...
   // ByteSize would also be an appropriate type.
-  static int header_offset_in_bytes()      { return offset_of(ObjectMonitor, _header);     }
-  static int object_offset_in_bytes()      { return offset_of(ObjectMonitor, _object);     }
-  static int owner_offset_in_bytes()       { return offset_of(ObjectMonitor, _owner);      }
-  static int count_offset_in_bytes()       { return offset_of(ObjectMonitor, _count);      }
+  static int header_offset_in_bytes()      { return offset_of(ObjectMonitor, _header); }
+  static int object_offset_in_bytes()      { return offset_of(ObjectMonitor, _object); }
+  static int owner_offset_in_bytes()       { return offset_of(ObjectMonitor, _owner); }
+  static int count_offset_in_bytes()       { return offset_of(ObjectMonitor, _count); }
   static int recursions_offset_in_bytes()  { return offset_of(ObjectMonitor, _recursions); }
-  static int cxq_offset_in_bytes()         { return offset_of(ObjectMonitor, _cxq);       }
-  static int succ_offset_in_bytes()        { return offset_of(ObjectMonitor, _succ);      }
-  static int EntryList_offset_in_bytes()   { return offset_of(ObjectMonitor, _EntryList);  }
-  static int FreeNext_offset_in_bytes()    { return offset_of(ObjectMonitor, FreeNext);    }
-  static int WaitSet_offset_in_bytes()     { return offset_of(ObjectMonitor, _WaitSet);   }
+  static int cxq_offset_in_bytes()         { return offset_of(ObjectMonitor, _cxq); }
+  static int succ_offset_in_bytes()        { return offset_of(ObjectMonitor, _succ); }
+  static int EntryList_offset_in_bytes()   { return offset_of(ObjectMonitor, _EntryList); }
+  static int FreeNext_offset_in_bytes()    { return offset_of(ObjectMonitor, FreeNext); }
+  static int WaitSet_offset_in_bytes()     { return offset_of(ObjectMonitor, _WaitSet); }
   static int Responsible_offset_in_bytes() { return offset_of(ObjectMonitor, _Responsible); }
-  static int Spinner_offset_in_bytes()     { return offset_of(ObjectMonitor, _Spinner);    }
+  static int Spinner_offset_in_bytes()     { return offset_of(ObjectMonitor, _Spinner); }
 
  public:
   // Eventually we'll make provisions for multiple callbacks, but
@@ -140,7 +140,7 @@
   ObjectMonitor() {
     _header       = NULL;
     _count        = 0;
-    _waiters      = 0,
+    _waiters      = 0;
     _recursions   = 0;
     _object       = NULL;
     _owner        = NULL;
@@ -158,12 +158,12 @@
   }
 
   ~ObjectMonitor() {
-   // TODO: Add asserts ...
-   // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
-   // _count == 0 _EntryList  == NULL etc
+    // TODO: Add asserts ...
+    // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
+    // _count == 0 _EntryList  == NULL etc
   }
 
-private:
+ private:
   void Recycle() {
     // TODO: add stronger asserts ...
     // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
@@ -180,7 +180,7 @@
     OwnerIsThread  = 0;
   }
 
-public:
+ public:
 
   void*     object() const;
   void*     object_addr();
@@ -225,9 +225,9 @@
   void      ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
   bool      ExitSuspendEquivalent(JavaThread * Self);
   void      post_monitor_wait_event(EventJavaMonitorWait * event,
-                                                   jlong notifier_tid,
-                                                   jlong timeout,
-                                                   bool timedout);
+                                    jlong notifier_tid,
+                                    jlong timeout,
+                                    bool timedout);
 
  private:
   friend class ObjectSynchronizer;
@@ -240,7 +240,7 @@
   volatile markOop   _header;       // displaced object header word - mark
   void*     volatile _object;       // backward object pointer - strong root
 
-  double SharingPad[1];           // temp to reduce false sharing
+  double SharingPad[1];             // temp to reduce false sharing
 
   // All the following fields must be machine word aligned
   // The VM assumes write ordering wrt these fields, which can be
@@ -248,25 +248,25 @@
 
  protected:                         // protected for jvmtiRawMonitor
   void *  volatile _owner;          // pointer to owning thread OR BasicLock
-  volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor
+  volatile jlong _previous_owner_tid;  // thread id of the previous owner of the monitor
   volatile intptr_t  _recursions;   // recursion count, 0 for first entry
  private:
-  int OwnerIsThread;               // _owner is (Thread *) vs SP/BasicLock
-  ObjectWaiter * volatile _cxq;    // LL of recently-arrived threads blocked on entry.
+  int OwnerIsThread;                // _owner is (Thread *) vs SP/BasicLock
+  ObjectWaiter * volatile _cxq;     // LL of recently-arrived threads blocked on entry.
                                     // The list is actually composed of WaitNodes, acting
                                     // as proxies for Threads.
  protected:
-  ObjectWaiter * volatile _EntryList;     // Threads blocked on entry or reentry.
+  ObjectWaiter * volatile _EntryList;  // Threads blocked on entry or reentry.
  private:
   Thread * volatile _succ;          // Heir presumptive thread - used for futile wakeup throttling
   Thread * volatile _Responsible;
-  int _PromptDrain;                // rqst to drain cxq into EntryList ASAP
+  int _PromptDrain;                 // rqst to drain cxq into EntryList ASAP
 
-  volatile int _Spinner;           // for exit->spinner handoff optimization
-  volatile int _SpinFreq;          // Spin 1-out-of-N attempts: success rate
+  volatile int _Spinner;            // for exit->spinner handoff optimization
+  volatile int _SpinFreq;           // Spin 1-out-of-N attempts: success rate
   volatile int _SpinClock;
   volatile int _SpinDuration;
-  volatile intptr_t _SpinState;    // MCS/CLH list of spinners
+  volatile intptr_t _SpinState;     // MCS/CLH list of spinners
 
   // TODO-FIXME: _count, _waiters and _recursions should be of
   // type int, or int32_t but not intptr_t.  There's no reason
@@ -284,8 +284,8 @@
   volatile int _WaitSetLock;        // protects Wait Queue - simple spinlock
 
  public:
-  int _QMix;                       // Mixed prepend queue discipline
-  ObjectMonitor * FreeNext;        // Free list linkage
+  int _QMix;                        // Mixed prepend queue discipline
+  ObjectMonitor * FreeNext;         // Free list linkage
   intptr_t StatA, StatsB;
 
  public:
@@ -328,9 +328,17 @@
 };
 
 #undef TEVENT
-#define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
+#define TEVENT(nom) { if (SyncVerbose) FEVENT(nom); }
 
-#define FEVENT(nom) { static volatile int ctr = 0; int v = ++ctr; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
+#define FEVENT(nom)                 \
+  {                                 \
+    static volatile int ctr = 0;    \
+    int v = ++ctr;                  \
+    if ((v & (v - 1)) == 0) {       \
+      ::printf(#nom " : %d\n", v);  \
+      ::fflush(stdout);             \
+    }                               \
+  }
 
 #undef  TEVENT
 #define TEVENT(nom) {;}
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Sep 10 17:06:36 2014 -0700
@@ -50,8 +50,8 @@
 
  private:
   static methodHandle resolve_sub_helper(JavaThread *thread,
-                                     bool is_virtual,
-                                     bool is_optimized, TRAPS);
+                                         bool is_virtual,
+                                         bool is_optimized, TRAPS);
 
   // Shared stub locations
 
@@ -271,35 +271,33 @@
   // used by native wrappers to reenable yellow if overflow happened in native code
   static void reguard_yellow_pages();
 
-  /**
-   * Fill in the "X cannot be cast to a Y" message for ClassCastException
-   *
-   * @param thr the current thread
-   * @param name the name of the class of the object attempted to be cast
-   * @return the dynamically allocated exception message (must be freed
-   * by the caller using a resource mark)
-   *
-   * BCP must refer to the current 'checkcast' opcode for the frame
-   * on top of the stack.
-   * The caller (or one of it's callers) must use a ResourceMark
-   * in order to correctly free the result.
-   */
+  // Fill in the "X cannot be cast to a Y" message for ClassCastException
+  //
+  // @param thr the current thread
+  // @param name the name of the class of the object attempted to be cast
+  // @return the dynamically allocated exception message (must be freed
+  // by the caller using a resource mark)
+  //
+  // BCP must refer to the current 'checkcast' opcode for the frame
+  // on top of the stack.
+  // The caller (or one of it's callers) must use a ResourceMark
+  // in order to correctly free the result.
+  //
   static char* generate_class_cast_message(JavaThread* thr, const char* name);
 
-  /**
-   * Fill in the "X cannot be cast to a Y" message for ClassCastException
-   *
-   * @param name the name of the class of the object attempted to be cast
-   * @param klass the name of the target klass attempt
-   * @param gripe the specific kind of problem being reported
-   * @return the dynamically allocated exception message (must be freed
-   * by the caller using a resource mark)
-   *
-   * This version does not require access the frame, so it can be called
-   * from interpreted code
-   * The caller (or one of it's callers) must use a ResourceMark
-   * in order to correctly free the result.
-   */
+  // Fill in the "X cannot be cast to a Y" message for ClassCastException
+  //
+  // @param name the name of the class of the object attempted to be cast
+  // @param klass the name of the target klass attempt
+  // @param gripe the specific kind of problem being reported
+  // @return the dynamically allocated exception message (must be freed
+  // by the caller using a resource mark)
+  //
+  // This version does not require access the frame, so it can be called
+  // from interpreted code
+  // The caller (or one of it's callers) must use a ResourceMark
+  // in order to correctly free the result.
+  //
   static char* generate_class_cast_message(const char* name, const char* klass,
                                            const char* gripe = " cannot be cast to ");
 
@@ -309,11 +307,11 @@
                                      bool is_virtual,
                                      bool is_optimized, TRAPS);
 
-  private:
+ private:
   // deopt blob
   static void generate_deopt_blob(void);
 
-  public:
+ public:
   static DeoptimizationBlob* deopt_blob(void)      { return _deopt_blob; }
 
   // Resets a call-site in compiled code so it will get resolved again.
@@ -422,17 +420,17 @@
   // pointer to the C heap storage. This pointer is the return value from
   // OSR_migration_begin.
 
-  static intptr_t* OSR_migration_begin( JavaThread *thread);
+  static intptr_t* OSR_migration_begin(JavaThread *thread);
 
   // OSR_migration_end is a trivial routine. It is called after the compiled
   // method has extracted the jvm state from the C heap that OSR_migration_begin
   // created. It's entire job is to simply free this storage.
-  static void      OSR_migration_end  ( intptr_t* buf);
+  static void OSR_migration_end(intptr_t* buf);
 
   // Convert a sig into a calling convention register layout
   // and find interesting things about it.
   static VMRegPair* find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int *arg_size);
-  static VMReg     name_for_receiver();
+  static VMReg name_for_receiver();
 
   // "Top of Stack" slots that may be unused by the calling convention but must
   // otherwise be preserved.
@@ -691,7 +689,7 @@
   static bool contains(CodeBlob* b);
 #ifndef PRODUCT
   static void print_statistics();
-#endif /* PRODUCT */
+#endif // PRODUCT
 
 };
 
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Wed Sep 10 17:06:36 2014 -0700
@@ -43,7 +43,7 @@
 #include "utilities/preserveException.hpp"
 
 #if defined(__GNUC__) && !defined(PPC64)
-  // Need to inhibit inlining for older versions of GCC to avoid build-time failures
+// Need to inhibit inlining for older versions of GCC to avoid build-time failures
   #define NOINLINE __attribute__((noinline))
 #else
   #define NOINLINE
@@ -57,7 +57,6 @@
 // for instance.  If you make changes here, make sure to modify the
 // interpreter, and both C1 and C2 fast-path inline locking code emission.
 //
-//
 // -----------------------------------------------------------------------------
 
 #ifdef DTRACE_ENABLED
@@ -77,10 +76,10 @@
 
 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
   {                                                                        \
-    if (DTraceMonitorProbes) {                                            \
+    if (DTraceMonitorProbes) {                                             \
       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
       HOTSPOT_MONITOR_WAIT(jtid,                                           \
-                           (uintptr_t)(monitor), bytes, len, (millis));  \
+                           (uintptr_t)(monitor), bytes, len, (millis));    \
     }                                                                      \
   }
 
@@ -88,10 +87,10 @@
 
 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
   {                                                                        \
-    if (DTraceMonitorProbes) {                                            \
+    if (DTraceMonitorProbes) {                                             \
       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
       HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */             \
-                       (uintptr_t)(monitor), bytes, len);                  \
+                                    (uintptr_t)(monitor), bytes, len);     \
     }                                                                      \
   }
 
@@ -116,8 +115,8 @@
 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
 int ObjectSynchronizer::gOmInUseCount = 0;
 static volatile intptr_t ListLock = 0;      // protects global monitor free-list cache
-static volatile int MonitorFreeCount  = 0;      // # on gFreeList
-static volatile int MonitorPopulation = 0;      // # Extant -- in circulation
+static volatile int MonitorFreeCount  = 0;  // # on gFreeList
+static volatile int MonitorPopulation = 0;  // # Extant -- in circulation
 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 
 // -----------------------------------------------------------------------------
@@ -127,8 +126,9 @@
 // if the following function is changed. The implementation is
 // extremely sensitive to race condition. Be careful.
 
-void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
- if (UseBiasedLocking) {
+void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
+                                    bool attempt_rebias, TRAPS) {
+  if (UseBiasedLocking) {
     if (!SafepointSynchronize::is_at_safepoint()) {
       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
@@ -139,9 +139,9 @@
       BiasedLocking::revoke_at_safepoint(obj);
     }
     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
- }
+  }
 
- slow_enter(obj, lock, THREAD);
+  slow_enter(obj, lock, THREAD);
 }
 
 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
@@ -150,19 +150,19 @@
   markOop dhw = lock->displaced_header();
   markOop mark;
   if (dhw == NULL) {
-     // Recursive stack-lock.
-     // Diagnostics -- Could be: stack-locked, inflating, inflated.
-     mark = object->mark();
-     assert(!mark->is_neutral(), "invariant");
-     if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
-        assert(THREAD->is_lock_owned((address)mark->locker()), "invariant");
-     }
-     if (mark->has_monitor()) {
-        ObjectMonitor * m = mark->monitor();
-        assert(((oop)(m->object()))->mark() == mark, "invariant");
-        assert(m->is_entered(THREAD), "invariant");
-     }
-     return;
+    // Recursive stack-lock.
+    // Diagnostics -- Could be: stack-locked, inflating, inflated.
+    mark = object->mark();
+    assert(!mark->is_neutral(), "invariant");
+    if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
+      assert(THREAD->is_lock_owned((address)mark->locker()), "invariant");
+    }
+    if (mark->has_monitor()) {
+      ObjectMonitor * m = mark->monitor();
+      assert(((oop)(m->object()))->mark() == mark, "invariant");
+      assert(m->is_entered(THREAD), "invariant");
+    }
+    return;
   }
 
   mark = object->mark();
@@ -170,11 +170,11 @@
   // If the object is stack-locked by the current thread, try to
   // swing the displaced header from the box back to the mark.
   if (mark == (markOop) lock) {
-     assert(dhw->is_neutral(), "invariant");
-     if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
-        TEVENT(fast_exit: release stacklock);
-        return;
-     }
+    assert(dhw->is_neutral(), "invariant");
+    if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
+      TEVENT(fast_exit: release stacklock);
+      return;
+    }
   }
 
   ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD);
@@ -198,8 +198,8 @@
       return;
     }
     // Fall through to inflate() ...
-  } else
-  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+  } else if (mark->has_locker() &&
+             THREAD->is_lock_owned((address)mark->locker())) {
     assert(lock != mark->locker(), "must not re-lock the same lock");
     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
     lock->set_displaced_header(NULL);
@@ -261,7 +261,7 @@
 // -----------------------------------------------------------------------------
 // JNI locks on java objects
 // NOTE: must use heavy weight monitor to handle jni monitor enter
-void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
+void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
   // the current locking is from JNI instead of Java code
   TEVENT(jni_enter);
   if (UseBiasedLocking) {
@@ -299,7 +299,7 @@
   // If this thread has locked the object, exit the monitor.  Note:  can't use
   // monitor->check(CHECK); must exit even if an exception is pending.
   if (monitor->check(THREAD)) {
-     monitor->exit(true, THREAD);
+    monitor->exit(true, THREAD);
   }
 }
 
@@ -349,7 +349,7 @@
   return dtrace_waited_probe(monitor, obj, THREAD);
 }
 
-void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
+void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
   if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
@@ -362,7 +362,7 @@
 }
 
 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
- if (UseBiasedLocking) {
+  if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   }
@@ -410,23 +410,23 @@
 // performed by the CPU(s) or platform.
 
 struct SharedGlobals {
-    // These are highly shared mostly-read variables.
-    // To avoid false-sharing they need to be the sole occupants of a $ line.
-    double padPrefix[8];
-    volatile int stwRandom;
-    volatile int stwCycle;
+  // These are highly shared mostly-read variables.
+  // To avoid false-sharing they need to be the sole occupants of a $ line.
+  double padPrefix[8];
+  volatile int stwRandom;
+  volatile int stwCycle;
 
-    // Hot RW variables -- Sequester to avoid false-sharing
-    double padSuffix[16];
-    volatile int hcSequence;
-    double padFinal[8];
+  // Hot RW variables -- Sequester to avoid false-sharing
+  double padSuffix[16];
+  volatile int hcSequence;
+  double padFinal[8];
 };
 
 static SharedGlobals GVars;
 static int MonitorScavengeThreshold = 1000000;
 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
 
-static markOop ReadStableMark (oop obj) {
+static markOop ReadStableMark(oop obj) {
   markOop mark = obj->mark();
   if (!mark->is_being_inflated()) {
     return mark;       // normal fast-path return
@@ -451,45 +451,45 @@
 
     ++its;
     if (its > 10000 || !os::is_MP()) {
-       if (its & 1) {
-         os::naked_yield();
-         TEVENT(Inflate: INFLATING - yield);
-       } else {
-         // Note that the following code attenuates the livelock problem but is not
-         // a complete remedy.  A more complete solution would require that the inflating
-         // thread hold the associated inflation lock.  The following code simply restricts
-         // the number of spinners to at most one.  We'll have N-2 threads blocked
-         // on the inflationlock, 1 thread holding the inflation lock and using
-         // a yield/park strategy, and 1 thread in the midst of inflation.
-         // A more refined approach would be to change the encoding of INFLATING
-         // to allow encapsulation of a native thread pointer.  Threads waiting for
-         // inflation to complete would use CAS to push themselves onto a singly linked
-         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
-         // and calling park().  When inflation was complete the thread that accomplished inflation
-         // would detach the list and set the markword to inflated with a single CAS and
-         // then for each thread on the list, set the flag and unpark() the thread.
-         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
-         // wakes at most one thread whereas we need to wake the entire list.
-         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
-         int YieldThenBlock = 0;
-         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
-         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
-         Thread::muxAcquire(InflationLocks + ix, "InflationLock");
-         while (obj->mark() == markOopDesc::INFLATING()) {
-           // Beware: NakedYield() is advisory and has almost no effect on some platforms
-           // so we periodically call Self->_ParkEvent->park(1).
-           // We use a mixed spin/yield/block mechanism.
-           if ((YieldThenBlock++) >= 16) {
-              Thread::current()->_ParkEvent->park(1);
-           } else {
-              os::naked_yield();
-           }
-         }
-         Thread::muxRelease(InflationLocks + ix);
-         TEVENT(Inflate: INFLATING - yield/park);
-       }
+      if (its & 1) {
+        os::naked_yield();
+        TEVENT(Inflate: INFLATING - yield);
+      } else {
+        // Note that the following code attenuates the livelock problem but is not
+        // a complete remedy.  A more complete solution would require that the inflating
+        // thread hold the associated inflation lock.  The following code simply restricts
+        // the number of spinners to at most one.  We'll have N-2 threads blocked
+        // on the inflationlock, 1 thread holding the inflation lock and using
+        // a yield/park strategy, and 1 thread in the midst of inflation.
+        // A more refined approach would be to change the encoding of INFLATING
+        // to allow encapsulation of a native thread pointer.  Threads waiting for
+        // inflation to complete would use CAS to push themselves onto a singly linked
+        // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
+        // and calling park().  When inflation was complete the thread that accomplished inflation
+        // would detach the list and set the markword to inflated with a single CAS and
+        // then for each thread on the list, set the flag and unpark() the thread.
+        // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
+        // wakes at most one thread whereas we need to wake the entire list.
+        int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
+        int YieldThenBlock = 0;
+        assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
+        assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
+        Thread::muxAcquire(InflationLocks + ix, "InflationLock");
+        while (obj->mark() == markOopDesc::INFLATING()) {
+          // Beware: NakedYield() is advisory and has almost no effect on some platforms
+          // so we periodically call Self->_ParkEvent->park(1).
+          // We use a mixed spin/yield/block mechanism.
+          if ((YieldThenBlock++) >= 16) {
+            Thread::current()->_ParkEvent->park(1);
+          } else {
+            os::naked_yield();
+          }
+        }
+        Thread::muxRelease(InflationLocks + ix);
+        TEVENT(Inflate: INFLATING - yield/park);
+      }
     } else {
-       SpinPause();       // SMP-polite spinning
+      SpinPause();       // SMP-polite spinning
     }
   }
 }
@@ -510,45 +510,40 @@
 //   result in hashtable collisions and reduced hashtable efficiency.
 //   There are simple ways to "diffuse" the middle address bits over the
 //   generated hashCode values:
-//
 
 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
   intptr_t value = 0;
   if (hashCode == 0) {
-     // This form uses an unguarded global Park-Miller RNG,
-     // so it's possible for two threads to race and generate the same RNG.
-     // On MP system we'll have lots of RW access to a global, so the
-     // mechanism induces lots of coherency traffic.
-     value = os::random();
-  } else
-  if (hashCode == 1) {
-     // This variation has the property of being stable (idempotent)
-     // between STW operations.  This can be useful in some of the 1-0
-     // synchronization schemes.
-     intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
-     value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
-  } else
-  if (hashCode == 2) {
-     value = 1;            // for sensitivity testing
-  } else
-  if (hashCode == 3) {
-     value = ++GVars.hcSequence;
-  } else
-  if (hashCode == 4) {
-     value = cast_from_oop<intptr_t>(obj);
+    // This form uses an unguarded global Park-Miller RNG,
+    // so it's possible for two threads to race and generate the same RNG.
+    // On MP system we'll have lots of RW access to a global, so the
+    // mechanism induces lots of coherency traffic.
+    value = os::random();
+  } else if (hashCode == 1) {
+    // This variation has the property of being stable (idempotent)
+    // between STW operations.  This can be useful in some of the 1-0
+    // synchronization schemes.
+    intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
+    value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
+  } else if (hashCode == 2) {
+    value = 1;            // for sensitivity testing
+  } else if (hashCode == 3) {
+    value = ++GVars.hcSequence;
+  } else if (hashCode == 4) {
+    value = cast_from_oop<intptr_t>(obj);
   } else {
-     // Marsaglia's xor-shift scheme with thread-specific state
-     // This is probably the best overall implementation -- we'll
-     // likely make this the default in future releases.
-     unsigned t = Self->_hashStateX;
-     t ^= (t << 11);
-     Self->_hashStateX = Self->_hashStateY;
-     Self->_hashStateY = Self->_hashStateZ;
-     Self->_hashStateZ = Self->_hashStateW;
-     unsigned v = Self->_hashStateW;
-     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
-     Self->_hashStateW = v;
-     value = v;
+    // Marsaglia's xor-shift scheme with thread-specific state
+    // This is probably the best overall implementation -- we'll
+    // likely make this the default in future releases.
+    unsigned t = Self->_hashStateX;
+    t ^= (t << 11);
+    Self->_hashStateX = Self->_hashStateY;
+    Self->_hashStateY = Self->_hashStateZ;
+    Self->_hashStateZ = Self->_hashStateW;
+    unsigned v = Self->_hashStateW;
+    v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
+    Self->_hashStateW = v;
+    value = v;
   }
 
   value &= markOopDesc::hash_mask;
@@ -557,8 +552,8 @@
   TEVENT(hashCode: GENERATE);
   return value;
 }
-//
-intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
+
+intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
   if (UseBiasedLocking) {
     // NOTE: many places throughout the JVM do not expect a safepoint
     // to be taken here, in particular most operations on perm gen
@@ -572,7 +567,7 @@
       Handle hobj(Self, obj);
       // Relaxing assertion for bug 6320749.
       assert(Universe::verify_in_progress() ||
-              !SafepointSynchronize::is_at_safepoint(),
+             !SafepointSynchronize::is_at_safepoint(),
              "biases should not be seen by VM thread here");
       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
       obj = hobj();
@@ -583,16 +578,16 @@
   // hashCode() is a heap mutator ...
   // Relaxing assertion for bug 6320749.
   assert(Universe::verify_in_progress() ||
-          !SafepointSynchronize::is_at_safepoint(), "invariant");
+         !SafepointSynchronize::is_at_safepoint(), "invariant");
   assert(Universe::verify_in_progress() ||
-          Self->is_Java_thread() , "invariant");
+         Self->is_Java_thread() , "invariant");
   assert(Universe::verify_in_progress() ||
          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 
   ObjectMonitor* monitor = NULL;
   markOop temp, test;
   intptr_t hash;
-  markOop mark = ReadStableMark (obj);
+  markOop mark = ReadStableMark(obj);
 
   // object should remain ineligible for biased locking
   assert(!mark->has_bias_pattern(), "invariant");
@@ -706,7 +701,7 @@
   // The caller must beware this method can revoke bias, and
   // revocation can result in a safepoint.
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
-  assert(self->thread_state() != _thread_blocked , "invariant");
+  assert(self->thread_state() != _thread_blocked, "invariant");
 
   // Possible mark states: neutral, biased, stack-locked, inflated
 
@@ -841,7 +836,6 @@
 // --   unassigned and on a thread's private omFreeList
 // --   assigned to an object.  The object is inflated and the mark refers
 //      to the objectmonitor.
-//
 
 
 // Constraining monitor pool growth via MonitorBound ...
@@ -859,9 +853,8 @@
 // See also: GuaranteedSafepointInterval
 //
 // The current implementation uses asynchronous VM operations.
-//
 
-static void InduceScavenge (Thread * Self, const char * Whence) {
+static void InduceScavenge(Thread * Self, const char * Whence) {
   // Induce STW safepoint to trim monitors
   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
   // More precisely, trigger an asynchronous STW safepoint as the number
@@ -886,144 +879,144 @@
   }
 }
 
-void ObjectSynchronizer::verifyInUse (Thread *Self) {
-   ObjectMonitor* mid;
-   int inusetally = 0;
-   for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
-     inusetally++;
-   }
-   assert(inusetally == Self->omInUseCount, "inuse count off");
+void ObjectSynchronizer::verifyInUse(Thread *Self) {
+  ObjectMonitor* mid;
+  int inusetally = 0;
+  for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
+    inusetally++;
+  }
+  assert(inusetally == Self->omInUseCount, "inuse count off");
 
-   int freetally = 0;
-   for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
-     freetally++;
-   }
-   assert(freetally == Self->omFreeCount, "free count off");
+  int freetally = 0;
+  for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
+    freetally++;
+  }
+  assert(freetally == Self->omFreeCount, "free count off");
 }
 
-ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc (Thread * Self) {
-    // A large MAXPRIVATE value reduces both list lock contention
-    // and list coherency traffic, but also tends to increase the
-    // number of objectMonitors in circulation as well as the STW
-    // scavenge costs.  As usual, we lean toward time in space-time
-    // tradeoffs.
-    const int MAXPRIVATE = 1024;
-    for (;;) {
-        ObjectMonitor * m;
+ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) {
+  // A large MAXPRIVATE value reduces both list lock contention
+  // and list coherency traffic, but also tends to increase the
+  // number of objectMonitors in circulation as well as the STW
+  // scavenge costs.  As usual, we lean toward time in space-time
+  // tradeoffs.
+  const int MAXPRIVATE = 1024;
+  for (;;) {
+    ObjectMonitor * m;
 
-        // 1: try to allocate from the thread's local omFreeList.
-        // Threads will attempt to allocate first from their local list, then
-        // from the global list, and only after those attempts fail will the thread
-        // attempt to instantiate new monitors.   Thread-local free lists take
-        // heat off the ListLock and improve allocation latency, as well as reducing
-        // coherency traffic on the shared global list.
-        m = Self->omFreeList;
-        if (m != NULL) {
-           Self->omFreeList = m->FreeNext;
-           Self->omFreeCount--;
-           // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
-           guarantee(m->object() == NULL, "invariant");
-           if (MonitorInUseLists) {
-             m->FreeNext = Self->omInUseList;
-             Self->omInUseList = m;
-             Self->omInUseCount++;
-             if (ObjectMonitor::Knob_VerifyInUse) {
-               verifyInUse(Self);
-             }
-           } else {
-             m->FreeNext = NULL;
-           }
-           return m;
+    // 1: try to allocate from the thread's local omFreeList.
+    // Threads will attempt to allocate first from their local list, then
+    // from the global list, and only after those attempts fail will the thread
+    // attempt to instantiate new monitors.   Thread-local free lists take
+    // heat off the ListLock and improve allocation latency, as well as reducing
+    // coherency traffic on the shared global list.
+    m = Self->omFreeList;
+    if (m != NULL) {
+      Self->omFreeList = m->FreeNext;
+      Self->omFreeCount--;
+      // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
+      guarantee(m->object() == NULL, "invariant");
+      if (MonitorInUseLists) {
+        m->FreeNext = Self->omInUseList;
+        Self->omInUseList = m;
+        Self->omInUseCount++;
+        if (ObjectMonitor::Knob_VerifyInUse) {
+          verifyInUse(Self);
         }
+      } else {
+        m->FreeNext = NULL;
+      }
+      return m;
+    }
 
-        // 2: try to allocate from the global gFreeList
-        // CONSIDER: use muxTry() instead of muxAcquire().
-        // If the muxTry() fails then drop immediately into case 3.
-        // If we're using thread-local free lists then try
-        // to reprovision the caller's free list.
-        if (gFreeList != NULL) {
-            // Reprovision the thread's omFreeList.
-            // Use bulk transfers to reduce the allocation rate and heat
-            // on various locks.
-            Thread::muxAcquire(&ListLock, "omAlloc");
-            for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
-                MonitorFreeCount--;
-                ObjectMonitor * take = gFreeList;
-                gFreeList = take->FreeNext;
-                guarantee(take->object() == NULL, "invariant");
-                guarantee(!take->is_busy(), "invariant");
-                take->Recycle();
-                omRelease(Self, take, false);
-            }
-            Thread::muxRelease(&ListLock);
-            Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
-            if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
-            TEVENT(omFirst - reprovision);
+    // 2: try to allocate from the global gFreeList
+    // CONSIDER: use muxTry() instead of muxAcquire().
+    // If the muxTry() fails then drop immediately into case 3.
+    // If we're using thread-local free lists then try
+    // to reprovision the caller's free list.
+    if (gFreeList != NULL) {
+      // Reprovision the thread's omFreeList.
+      // Use bulk transfers to reduce the allocation rate and heat
+      // on various locks.
+      Thread::muxAcquire(&ListLock, "omAlloc");
+      for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
+        MonitorFreeCount--;
+        ObjectMonitor * take = gFreeList;
+        gFreeList = take->FreeNext;
+        guarantee(take->object() == NULL, "invariant");
+        guarantee(!take->is_busy(), "invariant");
+        take->Recycle();
+        omRelease(Self, take, false);
+      }
+      Thread::muxRelease(&ListLock);
+      Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
+      if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
+      TEVENT(omFirst - reprovision);
 
-            const int mx = MonitorBound;
-            if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
-              // We can't safely induce a STW safepoint from omAlloc() as our thread
-              // state may not be appropriate for such activities and callers may hold
-              // naked oops, so instead we defer the action.
-              InduceScavenge(Self, "omAlloc");
-            }
-            continue;
-        }
+      const int mx = MonitorBound;
+      if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
+        // We can't safely induce a STW safepoint from omAlloc() as our thread
+        // state may not be appropriate for such activities and callers may hold
+        // naked oops, so instead we defer the action.
+        InduceScavenge(Self, "omAlloc");
+      }
+      continue;
+    }
 
-        // 3: allocate a block of new ObjectMonitors
-        // Both the local and global free lists are empty -- resort to malloc().
-        // In the current implementation objectMonitors are TSM - immortal.
-        assert(_BLOCKSIZE > 1, "invariant");
-        ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
+    // 3: allocate a block of new ObjectMonitors
+    // Both the local and global free lists are empty -- resort to malloc().
+    // In the current implementation objectMonitors are TSM - immortal.
+    assert(_BLOCKSIZE > 1, "invariant");
+    ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
 
-        // NOTE: (almost) no way to recover if allocation failed.
-        // We might be able to induce a STW safepoint and scavenge enough
-        // objectMonitors to permit progress.
-        if (temp == NULL) {
-            vm_exit_out_of_memory(sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR,
-                                   "Allocate ObjectMonitors");
-        }
+    // NOTE: (almost) no way to recover if allocation failed.
+    // We might be able to induce a STW safepoint and scavenge enough
+    // objectMonitors to permit progress.
+    if (temp == NULL) {
+      vm_exit_out_of_memory(sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR,
+                            "Allocate ObjectMonitors");
+    }
 
-        // Format the block.
-        // initialize the linked list, each monitor points to its next
-        // forming the single linked free list, the very first monitor
-        // will points to next block, which forms the block list.
-        // The trick of using the 1st element in the block as gBlockList
-        // linkage should be reconsidered.  A better implementation would
-        // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
+    // Format the block.
+    // initialize the linked list, each monitor points to its next
+    // forming the single linked free list, the very first monitor
+    // will points to next block, which forms the block list.
+    // The trick of using the 1st element in the block as gBlockList
+    // linkage should be reconsidered.  A better implementation would
+    // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
 
-        for (int i = 1; i < _BLOCKSIZE; i++) {
-           temp[i].FreeNext = &temp[i+1];
-        }
+    for (int i = 1; i < _BLOCKSIZE; i++) {
+      temp[i].FreeNext = &temp[i+1];
+    }
 
-        // terminate the last monitor as the end of list
-        temp[_BLOCKSIZE - 1].FreeNext = NULL;
+    // terminate the last monitor as the end of list
+    temp[_BLOCKSIZE - 1].FreeNext = NULL;
 
-        // Element [0] is reserved for global list linkage
-        temp[0].set_object(CHAINMARKER);
+    // Element [0] is reserved for global list linkage
+    temp[0].set_object(CHAINMARKER);
 
-        // Consider carving out this thread's current request from the
-        // block in hand.  This avoids some lock traffic and redundant
-        // list activity.
+    // Consider carving out this thread's current request from the
+    // block in hand.  This avoids some lock traffic and redundant
+    // list activity.
 
-        // Acquire the ListLock to manipulate BlockList and FreeList.
-        // An Oyama-Taura-Yonezawa scheme might be more efficient.
-        Thread::muxAcquire(&ListLock, "omAlloc [2]");
-        MonitorPopulation += _BLOCKSIZE-1;
-        MonitorFreeCount += _BLOCKSIZE-1;
+    // Acquire the ListLock to manipulate BlockList and FreeList.
+    // An Oyama-Taura-Yonezawa scheme might be more efficient.
+    Thread::muxAcquire(&ListLock, "omAlloc [2]");
+    MonitorPopulation += _BLOCKSIZE-1;
+    MonitorFreeCount += _BLOCKSIZE-1;
 
-        // Add the new block to the list of extant blocks (gBlockList).
-        // The very first objectMonitor in a block is reserved and dedicated.
-        // It serves as blocklist "next" linkage.
-        temp[0].FreeNext = gBlockList;
-        gBlockList = temp;
+    // Add the new block to the list of extant blocks (gBlockList).
+    // The very first objectMonitor in a block is reserved and dedicated.
+    // It serves as blocklist "next" linkage.
+    temp[0].FreeNext = gBlockList;
+    gBlockList = temp;
 
-        // Add the new string of objectMonitors to the global free list
-        temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
-        gFreeList = temp + 1;
-        Thread::muxRelease(&ListLock);
-        TEVENT(Allocate block of monitors);
-    }
+    // Add the new string of objectMonitors to the global free list
+    temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
+    gFreeList = temp + 1;
+    Thread::muxRelease(&ListLock);
+    TEVENT(Allocate block of monitors);
+  }
 }
 
 // Place "m" on the caller's private per-thread omFreeList.
@@ -1032,30 +1025,30 @@
 // omRelease is to return a monitor to the free list after a CAS
 // attempt failed.  This doesn't allow unbounded #s of monitors to
 // accumulate on a thread's free list.
-//
 
-void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
-    guarantee(m->object() == NULL, "invariant");
+void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
+                                   bool fromPerThreadAlloc) {
+  guarantee(m->object() == NULL, "invariant");
 
-    // Remove from omInUseList
-    if (MonitorInUseLists && fromPerThreadAlloc) {
-      ObjectMonitor* curmidinuse = NULL;
-      for (ObjectMonitor* mid = Self->omInUseList; mid != NULL;) {
-       if (m == mid) {
-         // extract from per-thread in-use-list
-         if (mid == Self->omInUseList) {
-           Self->omInUseList = mid->FreeNext;
-         } else if (curmidinuse != NULL) {
-           curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
-         }
-         Self->omInUseCount--;
-         if (ObjectMonitor::Knob_VerifyInUse) {
-           verifyInUse(Self);
-         }
-         break;
-       } else {
-         curmidinuse = mid;
-         mid = mid->FreeNext;
+  // Remove from omInUseList
+  if (MonitorInUseLists && fromPerThreadAlloc) {
+    ObjectMonitor* curmidinuse = NULL;
+    for (ObjectMonitor* mid = Self->omInUseList; mid != NULL;) {
+      if (m == mid) {
+        // extract from per-thread in-use-list
+        if (mid == Self->omInUseList) {
+          Self->omInUseList = mid->FreeNext;
+        } else if (curmidinuse != NULL) {
+          curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
+        }
+        Self->omInUseCount--;
+        if (ObjectMonitor::Knob_VerifyInUse) {
+          verifyInUse(Self);
+        }
+        break;
+      } else {
+        curmidinuse = mid;
+        mid = mid->FreeNext;
       }
     }
   }
@@ -1086,54 +1079,54 @@
 // be not inopportune interleavings between omFlush() and the scavenge
 // operator.
 
-void ObjectSynchronizer::omFlush (Thread * Self) {
-    ObjectMonitor * List = Self->omFreeList;  // Null-terminated SLL
-    Self->omFreeList = NULL;
-    ObjectMonitor * Tail = NULL;
-    int Tally = 0;
-    if (List != NULL) {
-      ObjectMonitor * s;
-      for (s = List; s != NULL; s = s->FreeNext) {
-          Tally++;
-          Tail = s;
-          guarantee(s->object() == NULL, "invariant");
-          guarantee(!s->is_busy(), "invariant");
-          s->set_owner(NULL);   // redundant but good hygiene
-          TEVENT(omFlush - Move one);
-      }
-      guarantee(Tail != NULL && List != NULL, "invariant");
+void ObjectSynchronizer::omFlush(Thread * Self) {
+  ObjectMonitor * List = Self->omFreeList;  // Null-terminated SLL
+  Self->omFreeList = NULL;
+  ObjectMonitor * Tail = NULL;
+  int Tally = 0;
+  if (List != NULL) {
+    ObjectMonitor * s;
+    for (s = List; s != NULL; s = s->FreeNext) {
+      Tally++;
+      Tail = s;
+      guarantee(s->object() == NULL, "invariant");
+      guarantee(!s->is_busy(), "invariant");
+      s->set_owner(NULL);   // redundant but good hygiene
+      TEVENT(omFlush - Move one);
     }
+    guarantee(Tail != NULL && List != NULL, "invariant");
+  }
 
-    ObjectMonitor * InUseList = Self->omInUseList;
-    ObjectMonitor * InUseTail = NULL;
-    int InUseTally = 0;
-    if (InUseList != NULL) {
-      Self->omInUseList = NULL;
-      ObjectMonitor *curom;
-      for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
-        InUseTail = curom;
-        InUseTally++;
-      }
-      assert(Self->omInUseCount == InUseTally, "inuse count off");
-      Self->omInUseCount = 0;
-      guarantee(InUseTail != NULL && InUseList != NULL, "invariant");
+  ObjectMonitor * InUseList = Self->omInUseList;
+  ObjectMonitor * InUseTail = NULL;
+  int InUseTally = 0;
+  if (InUseList != NULL) {
+    Self->omInUseList = NULL;
+    ObjectMonitor *curom;
+    for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
+      InUseTail = curom;
+      InUseTally++;
     }
+    assert(Self->omInUseCount == InUseTally, "inuse count off");
+    Self->omInUseCount = 0;
+    guarantee(InUseTail != NULL && InUseList != NULL, "invariant");
+  }
 
-    Thread::muxAcquire(&ListLock, "omFlush");
-    if (Tail != NULL) {
-      Tail->FreeNext = gFreeList;
-      gFreeList = List;
-      MonitorFreeCount += Tally;
-    }
+  Thread::muxAcquire(&ListLock, "omFlush");
+  if (Tail != NULL) {
+    Tail->FreeNext = gFreeList;
+    gFreeList = List;
+    MonitorFreeCount += Tally;
+  }
 
-    if (InUseTail != NULL) {
-      InUseTail->FreeNext = gOmInUseList;
-      gOmInUseList = InUseList;
-      gOmInUseCount += InUseTally;
-    }
+  if (InUseTail != NULL) {
+    InUseTail->FreeNext = gOmInUseList;
+    gOmInUseList = InUseList;
+    gOmInUseCount += InUseTally;
+  }
 
-    Thread::muxRelease(&ListLock);
-    TEVENT(omFlush);
+  Thread::muxRelease(&ListLock);
+  TEVENT(omFlush);
 }
 
 // Fast path code shared by multiple functions
@@ -1152,193 +1145,194 @@
 // multiple locks occupy the same $ line.  Padding might be appropriate.
 
 
-ObjectMonitor * NOINLINE ObjectSynchronizer::inflate (Thread * Self, oop object) {
+ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self,
+                                                     oop object) {
   // Inflate mutates the heap ...
   // Relaxing assertion for bug 6320749.
   assert(Universe::verify_in_progress() ||
-          !SafepointSynchronize::is_at_safepoint(), "invariant");
+         !SafepointSynchronize::is_at_safepoint(), "invariant");
 
   for (;;) {
-      const markOop mark = object->mark();
-      assert(!mark->has_bias_pattern(), "invariant");
+    const markOop mark = object->mark();
+    assert(!mark->has_bias_pattern(), "invariant");
+
+    // The mark can be in one of the following states:
+    // *  Inflated     - just return
+    // *  Stack-locked - coerce it to inflated
+    // *  INFLATING    - busy wait for conversion to complete
+    // *  Neutral      - aggressively inflate the object.
+    // *  BIASED       - Illegal.  We should never see this
 
-      // The mark can be in one of the following states:
-      // *  Inflated     - just return
-      // *  Stack-locked - coerce it to inflated
-      // *  INFLATING    - busy wait for conversion to complete
-      // *  Neutral      - aggressively inflate the object.
-      // *  BIASED       - Illegal.  We should never see this
+    // CASE: inflated
+    if (mark->has_monitor()) {
+      ObjectMonitor * inf = mark->monitor();
+      assert(inf->header()->is_neutral(), "invariant");
+      assert(inf->object() == object, "invariant");
+      assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
+      return inf;
+    }
+
+    // CASE: inflation in progress - inflating over a stack-lock.
+    // Some other thread is converting from stack-locked to inflated.
+    // Only that thread can complete inflation -- other threads must wait.
+    // The INFLATING value is transient.
+    // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
+    // We could always eliminate polling by parking the thread on some auxiliary list.
+    if (mark == markOopDesc::INFLATING()) {
+      TEVENT(Inflate: spin while INFLATING);
+      ReadStableMark(object);
+      continue;
+    }
 
-      // CASE: inflated
-      if (mark->has_monitor()) {
-          ObjectMonitor * inf = mark->monitor();
-          assert(inf->header()->is_neutral(), "invariant");
-          assert(inf->object() == object, "invariant");
-          assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
-          return inf;
-      }
+    // CASE: stack-locked
+    // Could be stack-locked either by this thread or by some other thread.
+    //
+    // Note that we allocate the objectmonitor speculatively, _before_ attempting
+    // to install INFLATING into the mark word.  We originally installed INFLATING,
+    // allocated the objectmonitor, and then finally STed the address of the
+    // objectmonitor into the mark.  This was correct, but artificially lengthened
+    // the interval in which INFLATED appeared in the mark, thus increasing
+    // the odds of inflation contention.
+    //
+    // We now use per-thread private objectmonitor free lists.
+    // These list are reprovisioned from the global free list outside the
+    // critical INFLATING...ST interval.  A thread can transfer
+    // multiple objectmonitors en-mass from the global free list to its local free list.
+    // This reduces coherency traffic and lock contention on the global free list.
+    // Using such local free lists, it doesn't matter if the omAlloc() call appears
+    // before or after the CAS(INFLATING) operation.
+    // See the comments in omAlloc().
 
-      // CASE: inflation in progress - inflating over a stack-lock.
-      // Some other thread is converting from stack-locked to inflated.
-      // Only that thread can complete inflation -- other threads must wait.
-      // The INFLATING value is transient.
-      // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
-      // We could always eliminate polling by parking the thread on some auxiliary list.
-      if (mark == markOopDesc::INFLATING()) {
-         TEVENT(Inflate: spin while INFLATING);
-         ReadStableMark(object);
-         continue;
+    if (mark->has_locker()) {
+      ObjectMonitor * m = omAlloc(Self);
+      // Optimistically prepare the objectmonitor - anticipate successful CAS
+      // We do this before the CAS in order to minimize the length of time
+      // in which INFLATING appears in the mark.
+      m->Recycle();
+      m->_Responsible  = NULL;
+      m->OwnerIsThread = 0;
+      m->_recursions   = 0;
+      m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
+
+      markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark);
+      if (cmp != mark) {
+        omRelease(Self, m, true);
+        continue;       // Interference -- just retry
       }
 
-      // CASE: stack-locked
-      // Could be stack-locked either by this thread or by some other thread.
-      //
-      // Note that we allocate the objectmonitor speculatively, _before_ attempting
-      // to install INFLATING into the mark word.  We originally installed INFLATING,
-      // allocated the objectmonitor, and then finally STed the address of the
-      // objectmonitor into the mark.  This was correct, but artificially lengthened
-      // the interval in which INFLATED appeared in the mark, thus increasing
-      // the odds of inflation contention.
+      // We've successfully installed INFLATING (0) into the mark-word.
+      // This is the only case where 0 will appear in a mark-work.
+      // Only the singular thread that successfully swings the mark-word
+      // to 0 can perform (or more precisely, complete) inflation.
       //
-      // We now use per-thread private objectmonitor free lists.
-      // These list are reprovisioned from the global free list outside the
-      // critical INFLATING...ST interval.  A thread can transfer
-      // multiple objectmonitors en-mass from the global free list to its local free list.
-      // This reduces coherency traffic and lock contention on the global free list.
-      // Using such local free lists, it doesn't matter if the omAlloc() call appears
-      // before or after the CAS(INFLATING) operation.
-      // See the comments in omAlloc().
-
-      if (mark->has_locker()) {
-          ObjectMonitor * m = omAlloc(Self);
-          // Optimistically prepare the objectmonitor - anticipate successful CAS
-          // We do this before the CAS in order to minimize the length of time
-          // in which INFLATING appears in the mark.
-          m->Recycle();
-          m->_Responsible  = NULL;
-          m->OwnerIsThread = 0;
-          m->_recursions   = 0;
-          m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
-
-          markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark);
-          if (cmp != mark) {
-             omRelease(Self, m, true);
-             continue;       // Interference -- just retry
-          }
-
-          // We've successfully installed INFLATING (0) into the mark-word.
-          // This is the only case where 0 will appear in a mark-work.
-          // Only the singular thread that successfully swings the mark-word
-          // to 0 can perform (or more precisely, complete) inflation.
-          //
-          // Why do we CAS a 0 into the mark-word instead of just CASing the
-          // mark-word from the stack-locked value directly to the new inflated state?
-          // Consider what happens when a thread unlocks a stack-locked object.
-          // It attempts to use CAS to swing the displaced header value from the
-          // on-stack basiclock back into the object header.  Recall also that the
-          // header value (hashcode, etc) can reside in (a) the object header, or
-          // (b) a displaced header associated with the stack-lock, or (c) a displaced
-          // header in an objectMonitor.  The inflate() routine must copy the header
-          // value from the basiclock on the owner's stack to the objectMonitor, all
-          // the while preserving the hashCode stability invariants.  If the owner
-          // decides to release the lock while the value is 0, the unlock will fail
-          // and control will eventually pass from slow_exit() to inflate.  The owner
-          // will then spin, waiting for the 0 value to disappear.   Put another way,
-          // the 0 causes the owner to stall if the owner happens to try to
-          // drop the lock (restoring the header from the basiclock to the object)
-          // while inflation is in-progress.  This protocol avoids races that might
-          // would otherwise permit hashCode values to change or "flicker" for an object.
-          // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
-          // 0 serves as a "BUSY" inflate-in-progress indicator.
+      // Why do we CAS a 0 into the mark-word instead of just CASing the
+      // mark-word from the stack-locked value directly to the new inflated state?
+      // Consider what happens when a thread unlocks a stack-locked object.
+      // It attempts to use CAS to swing the displaced header value from the
+      // on-stack basiclock back into the object header.  Recall also that the
+      // header value (hashcode, etc) can reside in (a) the object header, or
+      // (b) a displaced header associated with the stack-lock, or (c) a displaced
+      // header in an objectMonitor.  The inflate() routine must copy the header
+      // value from the basiclock on the owner's stack to the objectMonitor, all
+      // the while preserving the hashCode stability invariants.  If the owner
+      // decides to release the lock while the value is 0, the unlock will fail
+      // and control will eventually pass from slow_exit() to inflate.  The owner
+      // will then spin, waiting for the 0 value to disappear.   Put another way,
+      // the 0 causes the owner to stall if the owner happens to try to
+      // drop the lock (restoring the header from the basiclock to the object)
+      // while inflation is in-progress.  This protocol avoids races that might
+      // would otherwise permit hashCode values to change or "flicker" for an object.
+      // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
+      // 0 serves as a "BUSY" inflate-in-progress indicator.
 
 
-          // fetch the displaced mark from the owner's stack.
-          // The owner can't die or unwind past the lock while our INFLATING
-          // object is in the mark.  Furthermore the owner can't complete
-          // an unlock on the object, either.
-          markOop dmw = mark->displaced_mark_helper();
-          assert(dmw->is_neutral(), "invariant");
-
-          // Setup monitor fields to proper values -- prepare the monitor
-          m->set_header(dmw);
-
-          // Optimization: if the mark->locker stack address is associated
-          // with this thread we could simply set m->_owner = Self and
-          // m->OwnerIsThread = 1. Note that a thread can inflate an object
-          // that it has stack-locked -- as might happen in wait() -- directly
-          // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
-          m->set_owner(mark->locker());
-          m->set_object(object);
-          // TODO-FIXME: assert BasicLock->dhw != 0.
+      // fetch the displaced mark from the owner's stack.
+      // The owner can't die or unwind past the lock while our INFLATING
+      // object is in the mark.  Furthermore the owner can't complete
+      // an unlock on the object, either.
+      markOop dmw = mark->displaced_mark_helper();
+      assert(dmw->is_neutral(), "invariant");
 
-          // Must preserve store ordering. The monitor state must
-          // be stable at the time of publishing the monitor address.
-          guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
-          object->release_set_mark(markOopDesc::encode(m));
-
-          // Hopefully the performance counters are allocated on distinct cache lines
-          // to avoid false sharing on MP systems ...
-          if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
-          TEVENT(Inflate: overwrite stacklock);
-          if (TraceMonitorInflation) {
-            if (object->is_instance()) {
-              ResourceMark rm;
-              tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
-                (void *) object, (intptr_t) object->mark(),
-                object->klass()->external_name());
-            }
-          }
-          return m;
-      }
+      // Setup monitor fields to proper values -- prepare the monitor
+      m->set_header(dmw);
 
-      // CASE: neutral
-      // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
-      // If we know we're inflating for entry it's better to inflate by swinging a
-      // pre-locked objectMonitor pointer into the object header.   A successful
-      // CAS inflates the object *and* confers ownership to the inflating thread.
-      // In the current implementation we use a 2-step mechanism where we CAS()
-      // to inflate and then CAS() again to try to swing _owner from NULL to Self.
-      // An inflateTry() method that we could call from fast_enter() and slow_enter()
-      // would be useful.
-
-      assert(mark->is_neutral(), "invariant");
-      ObjectMonitor * m = omAlloc(Self);
-      // prepare m for installation - set monitor to initial state
-      m->Recycle();
-      m->set_header(mark);
-      m->set_owner(NULL);
+      // Optimization: if the mark->locker stack address is associated
+      // with this thread we could simply set m->_owner = Self and
+      // m->OwnerIsThread = 1. Note that a thread can inflate an object
+      // that it has stack-locked -- as might happen in wait() -- directly
+      // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
+      m->set_owner(mark->locker());
       m->set_object(object);
-      m->OwnerIsThread = 1;
-      m->_recursions   = 0;
-      m->_Responsible  = NULL;
-      m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
+      // TODO-FIXME: assert BasicLock->dhw != 0.
 
-      if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
-          m->set_object(NULL);
-          m->set_owner(NULL);
-          m->OwnerIsThread = 0;
-          m->Recycle();
-          omRelease(Self, m, true);
-          m = NULL;
-          continue;
-          // interference - the markword changed - just retry.
-          // The state-transitions are one-way, so there's no chance of
-          // live-lock -- "Inflated" is an absorbing state.
-      }
+      // Must preserve store ordering. The monitor state must
+      // be stable at the time of publishing the monitor address.
+      guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
+      object->release_set_mark(markOopDesc::encode(m));
 
-      // Hopefully the performance counters are allocated on distinct
-      // cache lines to avoid false sharing on MP systems ...
+      // Hopefully the performance counters are allocated on distinct cache lines
+      // to avoid false sharing on MP systems ...
       if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
-      TEVENT(Inflate: overwrite neutral);
+      TEVENT(Inflate: overwrite stacklock);
       if (TraceMonitorInflation) {
         if (object->is_instance()) {
           ResourceMark rm;
           tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
-            (void *) object, (intptr_t) object->mark(),
-            object->klass()->external_name());
+                        (void *) object, (intptr_t) object->mark(),
+                        object->klass()->external_name());
         }
       }
       return m;
+    }
+
+    // CASE: neutral
+    // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
+    // If we know we're inflating for entry it's better to inflate by swinging a
+    // pre-locked objectMonitor pointer into the object header.   A successful
+    // CAS inflates the object *and* confers ownership to the inflating thread.
+    // In the current implementation we use a 2-step mechanism where we CAS()
+    // to inflate and then CAS() again to try to swing _owner from NULL to Self.
+    // An inflateTry() method that we could call from fast_enter() and slow_enter()
+    // would be useful.
+
+    assert(mark->is_neutral(), "invariant");
+    ObjectMonitor * m = omAlloc(Self);
+    // prepare m for installation - set monitor to initial state
+    m->Recycle();
+    m->set_header(mark);
+    m->set_owner(NULL);
+    m->set_object(object);
+    m->OwnerIsThread = 1;
+    m->_recursions   = 0;
+    m->_Responsible  = NULL;
+    m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
+
+    if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
+      m->set_object(NULL);
+      m->set_owner(NULL);
+      m->OwnerIsThread = 0;
+      m->Recycle();
+      omRelease(Self, m, true);
+      m = NULL;
+      continue;
+      // interference - the markword changed - just retry.
+      // The state-transitions are one-way, so there's no chance of
+      // live-lock -- "Inflated" is an absorbing state.
+    }
+
+    // Hopefully the performance counters are allocated on distinct
+    // cache lines to avoid false sharing on MP systems ...
+    if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
+    TEVENT(Inflate: overwrite neutral);
+    if (TraceMonitorInflation) {
+      if (object->is_instance()) {
+        ResourceMark rm;
+        tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
+                      (void *) object, (intptr_t) object->mark(),
+                      object->klass()->external_name());
+      }
+    }
+    return m;
   }
 }
 
@@ -1373,17 +1367,17 @@
 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
 // which in turn can mean large(r) numbers of objectmonitors in circulation.
 // This is an unfortunate aspect of this design.
-//
 
 enum ManifestConstants {
-    ClearResponsibleAtSTW   = 0,
-    MaximumRecheckInterval  = 1000
+  ClearResponsibleAtSTW   = 0,
+  MaximumRecheckInterval  = 1000
 };
 
 // Deflate a single monitor if not in use
 // Return true if deflated, false if in use
 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
-                                         ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp) {
+                                         ObjectMonitor** freeHeadp,
+                                         ObjectMonitor** freeTailp) {
   bool deflated;
   // Normal case ... The monitor is associated with obj.
   guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
@@ -1391,68 +1385,69 @@
   guarantee(mid->header()->is_neutral(), "invariant");
 
   if (mid->is_busy()) {
-     if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
-     deflated = false;
+    if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
+    deflated = false;
   } else {
-     // Deflate the monitor if it is no longer being used
-     // It's idle - scavenge and return to the global free list
-     // plain old deflation ...
-     TEVENT(deflate_idle_monitors - scavenge1);
-     if (TraceMonitorInflation) {
-       if (obj->is_instance()) {
-         ResourceMark rm;
-           tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
-                (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
-       }
-     }
+    // Deflate the monitor if it is no longer being used
+    // It's idle - scavenge and return to the global free list
+    // plain old deflation ...
+    TEVENT(deflate_idle_monitors - scavenge1);
+    if (TraceMonitorInflation) {
+      if (obj->is_instance()) {
+        ResourceMark rm;
+        tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
+                      (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
+      }
+    }
 
-     // Restore the header back to obj
-     obj->release_set_mark(mid->header());
-     mid->clear();
+    // Restore the header back to obj
+    obj->release_set_mark(mid->header());
+    mid->clear();
 
-     assert(mid->object() == NULL, "invariant");
+    assert(mid->object() == NULL, "invariant");
 
-     // Move the object to the working free list defined by FreeHead,FreeTail.
-     if (*freeHeadp == NULL) *freeHeadp = mid;
-     if (*freeTailp != NULL) {
-       ObjectMonitor * prevtail = *freeTailp;
-       assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
-       prevtail->FreeNext = mid;
-      }
-     *freeTailp = mid;
-     deflated = true;
+    // Move the object to the working free list defined by FreeHead,FreeTail.
+    if (*freeHeadp == NULL) *freeHeadp = mid;
+    if (*freeTailp != NULL) {
+      ObjectMonitor * prevtail = *freeTailp;
+      assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
+      prevtail->FreeNext = mid;
+    }
+    *freeTailp = mid;
+    deflated = true;
   }
   return deflated;
 }
 
 // Caller acquires ListLock
 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
-                                          ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp) {
+                                          ObjectMonitor** freeHeadp,
+                                          ObjectMonitor** freeTailp) {
   ObjectMonitor* mid;
   ObjectMonitor* next;
   ObjectMonitor* curmidinuse = NULL;
   int deflatedcount = 0;
 
   for (mid = *listheadp; mid != NULL;) {
-     oop obj = (oop) mid->object();
-     bool deflated = false;
-     if (obj != NULL) {
-       deflated = deflate_monitor(mid, obj, freeHeadp, freeTailp);
-     }
-     if (deflated) {
-       // extract from per-thread in-use-list
-       if (mid == *listheadp) {
-         *listheadp = mid->FreeNext;
-       } else if (curmidinuse != NULL) {
-         curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
-       }
-       next = mid->FreeNext;
-       mid->FreeNext = NULL;  // This mid is current tail in the FreeHead list
-       mid = next;
-       deflatedcount++;
-     } else {
-       curmidinuse = mid;
-       mid = mid->FreeNext;
+    oop obj = (oop) mid->object();
+    bool deflated = false;
+    if (obj != NULL) {
+      deflated = deflate_monitor(mid, obj, freeHeadp, freeTailp);
+    }
+    if (deflated) {
+      // extract from per-thread in-use-list
+      if (mid == *listheadp) {
+        *listheadp = mid->FreeNext;
+      } else if (curmidinuse != NULL) {
+        curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
+      }
+      next = mid->FreeNext;
+      mid->FreeNext = NULL;  // This mid is current tail in the FreeHead list
+      mid = next;
+      deflatedcount++;
+    } else {
+      curmidinuse = mid;
+      mid = mid->FreeNext;
     }
   }
   return deflatedcount;
@@ -1485,19 +1480,19 @@
       }
       nScavenged += deflatedcount;
       nInuse += cur->omInUseCount;
-     }
+    }
 
-   // For moribund threads, scan gOmInUseList
-   if (gOmInUseList) {
-     nInCirculation += gOmInUseCount;
-     int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
-     gOmInUseCount-= deflatedcount;
-     nScavenged += deflatedcount;
-     nInuse += gOmInUseCount;
+    // For moribund threads, scan gOmInUseList
+    if (gOmInUseList) {
+      nInCirculation += gOmInUseCount;
+      int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
+      gOmInUseCount-= deflatedcount;
+      nScavenged += deflatedcount;
+      nInuse += gOmInUseCount;
     }
 
   } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
-  // Iterate over all extant monitors - Scavenge all idle monitors.
+    // Iterate over all extant monitors - Scavenge all idle monitors.
     assert(block->object() == CHAINMARKER, "must be a block header");
     nInCirculation += _BLOCKSIZE;
     for (int i = 1; i < _BLOCKSIZE; i++) {
@@ -1529,8 +1524,8 @@
 
   if (ObjectMonitor::Knob_Verbose) {
     ::printf("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
-        nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
-        MonitorPopulation, MonitorFreeCount);
+             nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
+             MonitorPopulation, MonitorFreeCount);
     ::fflush(stdout);
   }
 
@@ -1538,11 +1533,11 @@
 
   // Move the scavenged monitors back to the global free list.
   if (FreeHead != NULL) {
-     guarantee(FreeTail != NULL && nScavenged > 0, "invariant");
-     assert(FreeTail->FreeNext == NULL, "invariant");
-     // constant-time list splice - prepend scavenged segment to gFreeList
-     FreeTail->FreeNext = gFreeList;
-     gFreeList = FreeHead;
+    guarantee(FreeTail != NULL && nScavenged > 0, "invariant");
+    assert(FreeTail->FreeNext == NULL, "invariant");
+    // constant-time list splice - prepend scavenged segment to gFreeList
+    FreeTail->FreeNext = gFreeList;
+    gFreeList = FreeHead;
   }
   Thread::muxRelease(&ListLock);
 
@@ -1561,10 +1556,10 @@
 // Gives up on a particular monitor if an exception occurs, but continues
 // the overall iteration, swallowing the exception.
 class ReleaseJavaMonitorsClosure: public MonitorClosure {
-private:
+ private:
   TRAPS;
 
-public:
+ public:
   ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
   void do_monitor(ObjectMonitor* mid) {
     if (mid->owner() == THREAD) {
--- a/hotspot/src/share/vm/runtime/synchronizer.hpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/share/vm/runtime/synchronizer.hpp	Wed Sep 10 17:06:36 2014 -0700
@@ -52,25 +52,26 @@
   // assembly copies of these routines. Please keep them synchronized.
   //
   // attempt_rebias flag is used by UseBiasedLocking implementation
-  static void fast_enter  (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS);
-  static void fast_exit   (oop obj,    BasicLock* lock, Thread* THREAD);
+  static void fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias,
+                         TRAPS);
+  static void fast_exit(oop obj, BasicLock* lock, Thread* THREAD);
 
   // WARNING: They are ONLY used to handle the slow cases. They should
   // only be used when the fast cases failed. Use of these functions
   // without previous fast case check may cause fatal error.
-  static void slow_enter  (Handle obj, BasicLock* lock, TRAPS);
-  static void slow_exit   (oop obj,    BasicLock* lock, Thread* THREAD);
+  static void slow_enter(Handle obj, BasicLock* lock, TRAPS);
+  static void slow_exit(oop obj, BasicLock* lock, Thread* THREAD);
 
   // Used only to handle jni locks or other unmatched monitor enter/exit
   // Internally they will use heavy weight monitor.
-  static void jni_enter   (Handle obj, TRAPS);
+  static void jni_enter(Handle obj, TRAPS);
   static bool jni_try_enter(Handle obj, Thread* THREAD); // Implements Unsafe.tryMonitorEnter
-  static void jni_exit    (oop obj,    Thread* THREAD);
+  static void jni_exit(oop obj, Thread* THREAD);
 
   // Handle all interpreter, compiler and jni cases
-  static int  wait               (Handle obj, jlong millis, TRAPS);
-  static void notify             (Handle obj,               TRAPS);
-  static void notifyall          (Handle obj,               TRAPS);
+  static int  wait(Handle obj, jlong millis, TRAPS);
+  static void notify(Handle obj, TRAPS);
+  static void notifyall(Handle obj, TRAPS);
 
   // Special internal-use-only method for use by JVM infrastructure
   // that needs to wait() on a java-level object but that can't risk
@@ -80,13 +81,14 @@
   // used by classloading to free classloader object lock,
   // wait on an internal lock, and reclaim original lock
   // with original recursion count
-  static intptr_t complete_exit  (Handle obj,                TRAPS);
-  static void reenter            (Handle obj, intptr_t recursion, TRAPS);
+  static intptr_t complete_exit(Handle obj, TRAPS);
+  static void reenter (Handle obj, intptr_t recursion, TRAPS);
 
   // thread-specific and global objectMonitor free list accessors
   static void verifyInUse(Thread * Self);
   static ObjectMonitor * omAlloc(Thread * Self);
-  static void omRelease(Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc);
+  static void omRelease(Thread * Self, ObjectMonitor * m,
+                        bool FromPerThreadAlloc);
   static void omFlush(Thread * Self);
 
   // Inflate light weight monitor to heavy weight monitor
@@ -116,7 +118,8 @@
   static int walk_monitor_list(ObjectMonitor** listheadp,
                                ObjectMonitor** freeHeadp,
                                ObjectMonitor** freeTailp);
-  static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** freeHeadp,
+  static bool deflate_monitor(ObjectMonitor* mid, oop obj,
+                              ObjectMonitor** freeHeadp,
                               ObjectMonitor** freeTailp);
   static void oops_do(OopClosure* f);
 
@@ -159,13 +162,13 @@
   ~ObjectLocker();
 
   // Monitor behavior
-  void wait      (TRAPS)      { ObjectSynchronizer::wait     (_obj, 0, CHECK); } // wait forever
-  void notify_all(TRAPS)      { ObjectSynchronizer::notifyall(_obj,    CHECK); }
-  void waitUninterruptibly (TRAPS) { ObjectSynchronizer::waitUninterruptibly (_obj, 0, CHECK); }
+  void wait(TRAPS)  { ObjectSynchronizer::wait(_obj, 0, CHECK); } // wait forever
+  void notify_all(TRAPS)  { ObjectSynchronizer::notifyall(_obj, CHECK); }
+  void waitUninterruptibly(TRAPS) { ObjectSynchronizer::waitUninterruptibly(_obj, 0, CHECK); }
   // complete_exit gives up lock completely, returning recursion count
   // reenter reclaims lock with original recursion count
-  intptr_t complete_exit(TRAPS) { return  ObjectSynchronizer::complete_exit(_obj, CHECK_0); }
-  void reenter(intptr_t recursion, TRAPS) { ObjectSynchronizer::reenter(_obj, recursion, CHECK); }
+  intptr_t complete_exit(TRAPS)  { return ObjectSynchronizer::complete_exit(_obj, CHECK_0); }
+  void reenter(intptr_t recursion, TRAPS)  { ObjectSynchronizer::reenter(_obj, recursion, CHECK); }
 };
 
 #endif // SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
--- a/hotspot/src/share/vm/runtime/thread.cpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Wed Sep 10 17:06:36 2014 -0700
@@ -109,25 +109,25 @@
 
 // Only bother with this argument setup if dtrace is available
 
-#define HOTSPOT_THREAD_PROBE_start HOTSPOT_THREAD_START
-#define HOTSPOT_THREAD_PROBE_stop HOTSPOT_THREAD_STOP
-
-#define DTRACE_THREAD_PROBE(probe, javathread)                             \
-  {                                                                        \
-    ResourceMark rm(this);                                                 \
-    int len = 0;                                                           \
-    const char* name = (javathread)->get_thread_name();                    \
-    len = strlen(name);                                                    \
-    HOTSPOT_THREAD_PROBE_##probe(  /* probe = start, stop */               \
-      (char *) name, len,                                                           \
-      java_lang_Thread::thread_id((javathread)->threadObj()),              \
-      (uintptr_t) (javathread)->osthread()->thread_id(),                               \
-      java_lang_Thread::is_daemon((javathread)->threadObj()));             \
-  }
+  #define HOTSPOT_THREAD_PROBE_start HOTSPOT_THREAD_START
+  #define HOTSPOT_THREAD_PROBE_stop HOTSPOT_THREAD_STOP
+
+  #define DTRACE_THREAD_PROBE(probe, javathread)                           \
+    {                                                                      \
+      ResourceMark rm(this);                                               \
+      int len = 0;                                                         \
+      const char* name = (javathread)->get_thread_name();                  \
+      len = strlen(name);                                                  \
+      HOTSPOT_THREAD_PROBE_##probe(/* probe = start, stop */               \
+        (char *) name, len,                                                \
+        java_lang_Thread::thread_id((javathread)->threadObj()),            \
+        (uintptr_t) (javathread)->osthread()->thread_id(),                 \
+        java_lang_Thread::is_daemon((javathread)->threadObj()));           \
+    }
 
 #else //  ndef DTRACE_ENABLED
 
-#define DTRACE_THREAD_PROBE(probe, javathread)
+  #define DTRACE_THREAD_PROBE(probe, javathread)
 
 #endif // ndef DTRACE_ENABLED
 
@@ -148,15 +148,16 @@
     size_t aligned_size = size + (alignment - sizeof(intptr_t));
     void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
                                           : AllocateHeap(aligned_size, flags, CURRENT_PC,
-                                              AllocFailStrategy::RETURN_NULL);
+                                                         AllocFailStrategy::RETURN_NULL);
     void* aligned_addr     = (void*) align_size_up((intptr_t) real_malloc_addr, alignment);
     assert(((uintptr_t) aligned_addr + (uintptr_t) size) <=
            ((uintptr_t) real_malloc_addr + (uintptr_t) aligned_size),
            "JavaThread alignment code overflowed allocated storage");
     if (TraceBiasedLocking) {
-      if (aligned_addr != real_malloc_addr)
+      if (aligned_addr != real_malloc_addr) {
         tty->print_cr("Aligned thread " INTPTR_FORMAT " to " INTPTR_FORMAT,
                       real_malloc_addr, aligned_addr);
+      }
     }
     ((Thread*) aligned_addr)->_real_malloc_address = real_malloc_addr;
     return aligned_addr;
@@ -264,7 +265,7 @@
            this == (void*) align_size_up((intptr_t) _real_malloc_address, markOopDesc::biased_lock_alignment),
            "bug in forced alignment of thread objects");
   }
-#endif /* ASSERT */
+#endif // ASSERT
 }
 
 void Thread::initialize_thread_local_storage() {
@@ -365,7 +366,7 @@
 #ifdef ASSERT
 // Private method to check for dangling thread pointer
 void check_for_dangling_thread_pointer(Thread *thread) {
- assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
+  assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
          "possibility of dangling Thread pointer");
 }
 #endif
@@ -445,7 +446,6 @@
 }
 
 
-//
 // Check if an external suspend request has completed (or has been
 // cancelled). Returns true if the thread is externally suspended and
 // false otherwise.
@@ -470,7 +470,6 @@
 // 0x00080000 - suspend request cancelled in loop (return false)
 // 0x00100000 - thread suspended in loop (return true)
 // 0x00200000 - suspend not completed during retry loop (return false)
-//
 
 // Helper class for tracing suspend wait debug bits.
 //
@@ -517,8 +516,8 @@
         ResourceMark rm;
 
         tty->print_cr(
-            "Failed wait_for_ext_suspend_completion(thread=%s, debug_bits=%x)",
-            jt->get_thread_name(), *bits);
+                      "Failed wait_for_ext_suspend_completion(thread=%s, debug_bits=%x)",
+                      jt->get_thread_name(), *bits);
 
         guarantee(!AssertOnSuspendWaitFailure, "external suspend wait failed");
       }
@@ -528,7 +527,8 @@
 #undef DEBUG_FALSE_BITS
 
 
-bool JavaThread::is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits) {
+bool JavaThread::is_ext_suspend_completed(bool called_by_wait, int delay,
+                                          uint32_t *bits) {
   TraceSuspendDebugBits tsdb(this, false /* !is_wait */, called_by_wait, bits);
 
   bool did_trans_retry = false;  // only do thread_in_native_trans retry once
@@ -649,12 +649,11 @@
   return false;
 }
 
-//
 // Wait for an external suspend request to complete (or be cancelled).
 // Returns true if the thread is externally suspended and false otherwise.
 //
 bool JavaThread::wait_for_ext_suspend_completion(int retries, int delay,
-       uint32_t *bits) {
+                                                 uint32_t *bits) {
   TraceSuspendDebugBits tsdb(this, true /* is_wait */,
                              false /* !called_by_wait */, bits);
 
@@ -737,20 +736,21 @@
 }
 
 #ifndef PRODUCT
-void JavaThread::record_jump(address target, address instr, const char* file, int line) {
+void JavaThread::record_jump(address target, address instr, const char* file,
+                             int line) {
 
   // This should not need to be atomic as the only way for simultaneous
   // updates is via interrupts. Even then this should be rare or non-existent
   // and we don't care that much anyway.
 
   int index = _jmp_ring_index;
-  _jmp_ring_index = (index + 1 ) & (jump_ring_buffer_size - 1);
+  _jmp_ring_index = (index + 1) & (jump_ring_buffer_size - 1);
   _jmp_ring[index]._target = (intptr_t) target;
   _jmp_ring[index]._instruction = (intptr_t) instr;
   _jmp_ring[index]._file = file;
   _jmp_ring[index]._line = line;
 }
-#endif /* PRODUCT */
+#endif // PRODUCT
 
 // Called by flat profiler
 // Callers have already called wait_for_ext_suspend_completion
@@ -759,8 +759,8 @@
   bool gotframe = false;
   // self suspension saves needed state.
   if (has_last_Java_frame() && _anchor.walkable()) {
-     *_fr = pd_last_frame();
-     gotframe = true;
+    *_fr = pd_last_frame();
+    gotframe = true;
   }
   return gotframe;
 }
@@ -790,7 +790,7 @@
     } else {
       guarantee(res == strong_roots_parity, "Or else what?");
       assert(SharedHeap::heap()->workers()->active_workers() > 0,
-         "Should only fail when parallel.");
+             "Should only fail when parallel.");
       return false;
     }
   }
@@ -834,13 +834,13 @@
 // Thread::print_on_error() is called by fatal error handler. Don't use
 // any lock or allocate memory.
 void Thread::print_on_error(outputStream* st, char* buf, int buflen) const {
-  if (is_VM_thread())                  st->print("VMThread");
-  else if (is_Compiler_thread())            st->print("CompilerThread");
-  else if (is_Java_thread())                st->print("JavaThread");
-  else if (is_GC_task_thread())             st->print("GCTaskThread");
-  else if (is_Watcher_thread())             st->print("WatcherThread");
-  else if (is_ConcurrentGC_thread())        st->print("ConcurrentGCThread");
-  else st->print("Thread");
+  if (is_VM_thread())                 st->print("VMThread");
+  else if (is_Compiler_thread())      st->print("CompilerThread");
+  else if (is_Java_thread())          st->print("JavaThread");
+  else if (is_GC_task_thread())       st->print("GCTaskThread");
+  else if (is_Watcher_thread())       st->print("WatcherThread");
+  else if (is_ConcurrentGC_thread())  st->print("ConcurrentGCThread");
+  else                                st->print("Thread");
 
   st->print(" [stack: " PTR_FORMAT "," PTR_FORMAT "]",
             _stack_base - _stack_size, _stack_base);
@@ -882,38 +882,39 @@
 // invoke the vm-thread (i.e., and oop allocation). In that case, we also have to make sure that
 // no threads which allow_vm_block's are held
 void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
-    // Check if current thread is allowed to block at a safepoint
-    if (!(_allow_safepoint_count == 0))
-      fatal("Possible safepoint reached by thread that does not allow it");
-    if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
-      fatal("LEAF method calling lock?");
-    }
+  // Check if current thread is allowed to block at a safepoint
+  if (!(_allow_safepoint_count == 0)) {
+    fatal("Possible safepoint reached by thread that does not allow it");
+  }
+  if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
+    fatal("LEAF method calling lock?");
+  }
 
 #ifdef ASSERT
-    if (potential_vm_operation && is_Java_thread()
-        && !Universe::is_bootstrapping()) {
-      // Make sure we do not hold any locks that the VM thread also uses.
-      // This could potentially lead to deadlocks
-      for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
-        // Threads_lock is special, since the safepoint synchronization will not start before this is
-        // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
-        // since it is used to transfer control between JavaThreads and the VMThread
-        // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
-        if ((cur->allow_vm_block() &&
-              cur != Threads_lock &&
-              cur != Compile_lock &&               // Temporary: should not be necessary when we get separate compilation
-              cur != VMOperationRequest_lock &&
-              cur != VMOperationQueue_lock) ||
-              cur->rank() == Mutex::special) {
-          fatal(err_msg("Thread holding lock at safepoint that vm can block on: %s", cur->name()));
-        }
+  if (potential_vm_operation && is_Java_thread()
+      && !Universe::is_bootstrapping()) {
+    // Make sure we do not hold any locks that the VM thread also uses.
+    // This could potentially lead to deadlocks
+    for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
+      // Threads_lock is special, since the safepoint synchronization will not start before this is
+      // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
+      // since it is used to transfer control between JavaThreads and the VMThread
+      // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
+      if ((cur->allow_vm_block() &&
+           cur != Threads_lock &&
+           cur != Compile_lock &&               // Temporary: should not be necessary when we get separate compilation
+           cur != VMOperationRequest_lock &&
+           cur != VMOperationQueue_lock) ||
+           cur->rank() == Mutex::special) {
+        fatal(err_msg("Thread holding lock at safepoint that vm can block on: %s", cur->name()));
       }
     }
-
-    if (GCALotAtAllSafepoints) {
-      // We could enter a safepoint here and thus have a gc
-      InterfaceSupport::check_gc_alot();
-    }
+  }
+
+  if (GCALotAtAllSafepoints) {
+    // We could enter a safepoint here and thus have a gc
+    InterfaceSupport::check_gc_alot();
+  }
 #endif
 }
 #endif
@@ -947,7 +948,7 @@
 }
 
 bool Thread::set_as_starting_thread() {
- // NOTE: this must be called inside the main thread.
+  // NOTE: this must be called inside the main thread.
   return os::create_main_thread((JavaThread*)this);
 }
 
@@ -991,7 +992,8 @@
 }
 
 // Creates the initial Thread
-static oop create_initial_thread(Handle thread_group, JavaThread* thread, TRAPS) {
+static oop create_initial_thread(Handle thread_group, JavaThread* thread,
+                                 TRAPS) {
   Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_NULL);
   instanceKlassHandle klass (THREAD, k);
   instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_NULL);
@@ -1004,12 +1006,12 @@
 
   JavaValue result(T_VOID);
   JavaCalls::call_special(&result, thread_oop,
-                                   klass,
-                                   vmSymbols::object_initializer_name(),
-                                   vmSymbols::threadgroup_string_void_signature(),
-                                   thread_group,
-                                   string,
-                                   CHECK_NULL);
+                          klass,
+                          vmSymbols::object_initializer_name(),
+                          vmSymbols::threadgroup_string_void_signature(),
+                          thread_group,
+                          string,
+                          CHECK_NULL);
   return thread_oop();
 }
 
@@ -1019,7 +1021,7 @@
 
   JavaValue result(T_VOID);
   JavaCalls::call_static(&result, klass, vmSymbols::initializeSystemClass_name(),
-                                         vmSymbols::void_method_signature(), CHECK);
+                         vmSymbols::void_method_signature(), CHECK);
 }
 
 char java_runtime_name[128] = "";
@@ -1028,15 +1030,16 @@
 // extract the JRE name from sun.misc.Version.java_runtime_name
 static const char* get_java_runtime_name(TRAPS) {
   Klass* k = SystemDictionary::find(vmSymbols::sun_misc_Version(),
-                                      Handle(), Handle(), CHECK_AND_CLEAR_NULL);
+                                    Handle(), Handle(), CHECK_AND_CLEAR_NULL);
   fieldDescriptor fd;
   bool found = k != NULL &&
                InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_name_name(),
                                                         vmSymbols::string_signature(), &fd);
   if (found) {
     oop name_oop = k->java_mirror()->obj_field(fd.offset());
-    if (name_oop == NULL)
+    if (name_oop == NULL) {
       return NULL;
+    }
     const char* name = java_lang_String::as_utf8_string(name_oop,
                                                         java_runtime_name,
                                                         sizeof(java_runtime_name));
@@ -1049,15 +1052,16 @@
 // extract the JRE version from sun.misc.Version.java_runtime_version
 static const char* get_java_runtime_version(TRAPS) {
   Klass* k = SystemDictionary::find(vmSymbols::sun_misc_Version(),
-                                      Handle(), Handle(), CHECK_AND_CLEAR_NULL);
+                                    Handle(), Handle(), CHECK_AND_CLEAR_NULL);
   fieldDescriptor fd;
   bool found = k != NULL &&
                InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_version_name(),
                                                         vmSymbols::string_signature(), &fd);
   if (found) {
     oop name_oop = k->java_mirror()->obj_field(fd.offset());
-    if (name_oop == NULL)
+    if (name_oop == NULL) {
       return NULL;
+    }
     const char* name = java_lang_String::as_utf8_string(name_oop,
                                                         java_runtime_version,
                                                         sizeof(java_runtime_version));
@@ -1075,8 +1079,8 @@
   if (klass.not_null()) {
     JavaValue result(T_VOID);
     JavaCalls::call_static(&result, klass, vmSymbols::run_method_name(),
-                                           vmSymbols::void_method_signature(),
-                                           CHECK);
+                           vmSymbols::void_method_signature(),
+                           CHECK);
   }
 }
 
@@ -1107,7 +1111,8 @@
 }
 
 
-void JavaThread::allocate_threadObj(Handle thread_group, char* thread_name, bool daemon, TRAPS) {
+void JavaThread::allocate_threadObj(Handle thread_group, char* thread_name,
+                                    bool daemon, TRAPS) {
   assert(thread_group.not_null(), "thread group should be specified");
   assert(threadObj() == NULL, "should only create Java thread object once");
 
@@ -1146,7 +1151,7 @@
 
 
   if (daemon) {
-      java_lang_Thread::set_daemon(thread_oop());
+    java_lang_Thread::set_daemon(thread_oop());
   }
 
   if (HAS_PENDING_EXCEPTION) {
@@ -1157,12 +1162,12 @@
   Handle threadObj(this, this->threadObj());
 
   JavaCalls::call_special(&result,
-                         thread_group,
-                         group,
-                         vmSymbols::add_method_name(),
-                         vmSymbols::thread_void_signature(),
-                         threadObj,          // Arg 1
-                         THREAD);
+                          thread_group,
+                          group,
+                          vmSymbols::add_method_name(),
+                          vmSymbols::thread_void_signature(),
+                          threadObj,          // Arg 1
+                          THREAD);
 
 
 }
@@ -1246,30 +1251,31 @@
     jlong now = os::javaTimeNanos();
 
     if (remaining == 0) {
-        // if we didn't have any tasks we could have waited for a long time
-        // consider the time_slept zero and reset time_before_loop
-        time_slept = 0;
-        time_before_loop = now;
+      // if we didn't have any tasks we could have waited for a long time
+      // consider the time_slept zero and reset time_before_loop
+      time_slept = 0;
+      time_before_loop = now;
     } else {
-        // need to recalculate since we might have new tasks in _tasks
-        time_slept = (int) ((now - time_before_loop) / 1000000);
+      // need to recalculate since we might have new tasks in _tasks
+      time_slept = (int) ((now - time_before_loop) / 1000000);
     }
 
     // Change to task list or spurious wakeup of some kind
     if (timedout || _should_terminate) {
-        break;
+      break;
     }
 
     remaining = PeriodicTask::time_to_wait();
     if (remaining == 0) {
-        // Last task was just disenrolled so loop around and wait until
-        // another task gets enrolled
-        continue;
+      // Last task was just disenrolled so loop around and wait until
+      // another task gets enrolled
+      continue;
     }
 
     remaining -= time_slept;
-    if (remaining <= 0)
+    if (remaining <= 0) {
       break;
+    }
   }
 
   return time_slept;
@@ -1302,13 +1308,13 @@
 
       for (;;) {
         if (!ShowMessageBoxOnError
-         && (OnError == NULL || OnError[0] == '\0')
-         && Arguments::abort_hook() == NULL) {
-             os::sleep(this, 2 * 60 * 1000, false);
-             fdStream err(defaultStream::output_fd());
-             err.print_raw_cr("# [ timer expired, abort... ]");
-             // skip atexit/vm_exit/vm_abort hooks
-             os::die();
+            && (OnError == NULL || OnError[0] == '\0')
+            && Arguments::abort_hook() == NULL) {
+          os::sleep(this, 2 * 60 * 1000, false);
+          fdStream err(defaultStream::output_fd());
+          err.print_raw_cr("# [ timer expired, abort... ]");
+          // skip atexit/vm_exit/vm_abort hooks
+          os::die();
         }
 
         // Wake up 5 seconds later, the fatal handler may reset OnError or
@@ -1387,7 +1393,9 @@
 }
 
 void WatcherThread::unpark() {
-  MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ? NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+  MutexLockerEx ml(PeriodicTask_lock->owned_by_self()
+                   ? NULL
+                   : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
   PeriodicTask_lock->notify();
 }
 
@@ -1455,7 +1463,7 @@
   for (int ji = 0; ji < jump_ring_buffer_size; ji++) {
     record_jump(NULL, NULL, NULL, 0);
   }
-#endif /* PRODUCT */
+#endif // PRODUCT
 
   set_thread_profiler(NULL);
   if (FlatProfiler::is_active()) {
@@ -1486,10 +1494,10 @@
 #endif // INCLUDE_ALL_GCS
 
 JavaThread::JavaThread(bool is_attaching_via_jni) :
-  Thread()
+                       Thread()
 #if INCLUDE_ALL_GCS
-  , _satb_mark_queue(&_satb_mark_queue_set),
-  _dirty_card_queue(&_dirty_card_queue_set)
+                       , _satb_mark_queue(&_satb_mark_queue_set),
+                       _dirty_card_queue(&_dirty_card_queue_set)
 #endif // INCLUDE_ALL_GCS
 {
   initialize();
@@ -1543,10 +1551,10 @@
 static void compiler_thread_entry(JavaThread* thread, TRAPS);
 
 JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
-  Thread()
+                       Thread()
 #if INCLUDE_ALL_GCS
-  , _satb_mark_queue(&_satb_mark_queue_set),
-  _dirty_card_queue(&_dirty_card_queue_set)
+                       , _satb_mark_queue(&_satb_mark_queue_set),
+                       _dirty_card_queue(&_dirty_card_queue_set)
 #endif // INCLUDE_ALL_GCS
 {
   if (TraceThreadEvents) {
@@ -1575,7 +1583,7 @@
 
 JavaThread::~JavaThread() {
   if (TraceThreadEvents) {
-      tty->print_cr("terminate thread %p", this);
+    tty->print_cr("terminate thread %p", this);
   }
 
   // JSR166 -- return the parker to the free list
@@ -1649,8 +1657,8 @@
 
   EventThreadStart event;
   if (event.should_commit()) {
-     event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
-     event.commit();
+    event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
+    event.commit();
   }
 
   // We call another function to do the rest so we are sure that the stack addresses used
@@ -1742,10 +1750,10 @@
       if (HAS_PENDING_EXCEPTION) {
         ResourceMark rm(this);
         jio_fprintf(defaultStream::error_stream(),
-              "\nException: %s thrown from the UncaughtExceptionHandler"
-              " in thread \"%s\"\n",
-              pending_exception()->klass()->external_name(),
-              get_thread_name());
+                    "\nException: %s thrown from the UncaughtExceptionHandler"
+                    " in thread \"%s\"\n",
+                    pending_exception()->klass()->external_name(),
+                    get_thread_name());
         CLEAR_PENDING_EXCEPTION;
       }
     }
@@ -1754,8 +1762,8 @@
     // from java_lang_Thread object
     EventThreadEnd event;
     if (event.should_commit()) {
-        event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
-        event.commit();
+      event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
+      event.commit();
     }
 
     // Call after last event on thread
@@ -1771,10 +1779,10 @@
         JavaValue result(T_VOID);
         KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
         JavaCalls::call_virtual(&result,
-                              threadObj, thread_klass,
-                              vmSymbols::exit_method_name(),
-                              vmSymbols::void_method_signature(),
-                              THREAD);
+                                threadObj, thread_klass,
+                                vmSymbols::exit_method_name(),
+                                vmSymbols::void_method_signature(),
+                                THREAD);
         CLEAR_PENDING_EXCEPTION;
       }
     }
@@ -2061,23 +2069,20 @@
       condition == _async_unsafe_access_error && !has_pending_exception()) {
     condition = _no_async_condition;  // done
     switch (thread_state()) {
-    case _thread_in_vm:
-      {
-        JavaThread* THREAD = this;
-        THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
-      }
-    case _thread_in_native:
-      {
-        ThreadInVMfromNative tiv(this);
-        JavaThread* THREAD = this;
-        THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
-      }
-    case _thread_in_Java:
-      {
-        ThreadInVMfromJava tiv(this);
-        JavaThread* THREAD = this;
-        THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in a recent unsafe memory access operation in compiled Java code");
-      }
+    case _thread_in_vm: {
+      JavaThread* THREAD = this;
+      THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
+    }
+    case _thread_in_native: {
+      ThreadInVMfromNative tiv(this);
+      JavaThread* THREAD = this;
+      THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
+    }
+    case _thread_in_Java: {
+      ThreadInVMfromJava tiv(this);
+      JavaThread* THREAD = this;
+      THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in a recent unsafe memory access operation in compiled Java code");
+    }
     default:
       ShouldNotReachHere();
     }
@@ -2170,8 +2175,8 @@
       set_pending_async_exception(java_throwable);
 
       if (TraceExceptions) {
-       ResourceMark rm;
-       tty->print_cr("Pending Async. exception installed of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
+        ResourceMark rm;
+        tty->print_cr("Pending Async. exception installed of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
       }
       // for AbortVMOnException flag
       NOT_PRODUCT(Exceptions::debug_check_abort(InstanceKlass::cast(_pending_async_exception->klass())->external_name()));
@@ -2198,7 +2203,7 @@
 void JavaThread::java_suspend() {
   { MutexLocker mu(Threads_lock);
     if (!Threads::includes(this) || is_exiting() || this->threadObj() == NULL) {
-       return;
+      return;
     }
   }
 
@@ -2214,7 +2219,7 @@
     // SR_lock to allow the thread to reach a stable thread state if
     // it is currently in a transient thread state.
     if (is_ext_suspend_completed(false /* !called_by_wait */,
-                                 SuspendRetryDelay, &debug_bits) ) {
+                                 SuspendRetryDelay, &debug_bits)) {
       return;
     }
   }
@@ -2241,18 +2246,18 @@
 
   // we are in the process of exiting so don't suspend
   if (is_exiting()) {
-     clear_external_suspend();
-     return ret;
+    clear_external_suspend();
+    return ret;
   }
 
   assert(_anchor.walkable() ||
-    (is_Java_thread() && !((JavaThread*)this)->has_last_Java_frame()),
-    "must have walkable stack");
+         (is_Java_thread() && !((JavaThread*)this)->has_last_Java_frame()),
+         "must have walkable stack");
 
   MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
 
   assert(!this->is_ext_suspended(),
-    "a thread trying to self-suspend should not already be suspended");
+         "a thread trying to self-suspend should not already be suspended");
 
   if (this->is_suspend_equivalent()) {
     // If we are self-suspending as a result of the lifting of a
@@ -2289,12 +2294,11 @@
 // hence doesn't need protection from concurrent access at this stage
 void JavaThread::verify_not_published() {
   if (!Threads_lock->owned_by_self()) {
-   MutexLockerEx ml(Threads_lock,  Mutex::_no_safepoint_check_flag);
-   assert(!Threads::includes(this),
+    MutexLockerEx ml(Threads_lock,  Mutex::_no_safepoint_check_flag);
+    assert(!Threads::includes(this),
            "java thread shouldn't have been published yet!");
-  }
-  else {
-   assert(!Threads::includes(this),
+  } else {
+    assert(!Threads::includes(this),
            "java thread shouldn't have been published yet!");
   }
 }
@@ -2474,7 +2478,7 @@
     if (os::unguard_memory((char *) low_addr, len)) {
       _stack_guard_state = stack_guard_unused;
     } else {
-        warning("Attempt to unprotect stack guard pages failed.");
+      warning("Attempt to unprotect stack guard pages failed.");
     }
   }
 }
@@ -2570,7 +2574,7 @@
         // search for the current bci in that string.
         address pc = fst.current()->pc();
         nmethod* nm =  (nmethod*) fst.current()->cb();
-        ScopeDesc* sd = nm->scope_desc_at( pc);
+        ScopeDesc* sd = nm->scope_desc_at(pc);
         char buffer[8];
         jio_snprintf(buffer, sizeof(buffer), "%d", sd->bci());
         size_t len = strlen(buffer);
@@ -2640,7 +2644,7 @@
 // the given JavaThread in its _processed_thread field.
 class RememberProcessedThread: public StackObj {
   NamedThread* _cur_thr;
-public:
+ public:
   RememberProcessedThread(JavaThread* jthr) {
     Thread* thread = Thread::current();
     if (thread->is_Named_thread()) {
@@ -2669,7 +2673,7 @@
   Thread::oops_do(f, cld_f, cf);
 
   assert((!has_last_Java_frame() && java_call_counter() == 0) ||
-          (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
+         (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
 
   if (has_last_Java_frame()) {
     // Record JavaThread to GC thread
@@ -2729,7 +2733,7 @@
   Thread::nmethods_do(cf);  // (super method is a no-op)
 
   assert((!has_last_Java_frame() && java_call_counter() == 0) ||
-          (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
+         (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
 
   if (has_last_Java_frame()) {
     // Traverse the execution stack
@@ -2779,7 +2783,7 @@
 };
 void JavaThread::print_thread_state() const {
   print_thread_state_on(tty);
-};
+}
 #endif // PRODUCT
 
 // Called by Threads::print() for VM_PrintThreads operation
@@ -2809,7 +2813,7 @@
   st->print("JavaThread \"%s\"", get_thread_name_string(buf, buflen));
   oop thread_obj = threadObj();
   if (thread_obj != NULL) {
-     if (java_lang_Thread::is_daemon(thread_obj)) st->print(" daemon");
+    if (java_lang_Thread::is_daemon(thread_obj)) st->print(" daemon");
   }
   st->print(" [");
   st->print("%s", _get_thread_state_name(_thread_state));
@@ -2853,7 +2857,7 @@
     }
   }
 #endif // ASSERT
-    return get_thread_name_string();
+  return get_thread_name_string();
 }
 
 // Returns a non-NULL representation of this thread's name, or a suitable
@@ -2865,20 +2869,18 @@
     typeArrayOop name = java_lang_Thread::name(thread_obj);
     if (name != NULL) {
       if (buf == NULL) {
-        name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
-      }
-      else {
-        name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length(), buf, buflen);
+        name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR),
+                                    name->length());
+      } else {
+        name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR),
+                                    name->length(), buf, buflen);
       }
-    }
-    else if (is_attaching_via_jni()) { // workaround for 6412693 - see 6404306
+    } else if (is_attaching_via_jni()) { // workaround for 6412693 - see 6404306
       name_str = "<no-name - thread is attaching>";
-    }
-    else {
+    } else {
       name_str = Thread::name();
     }
-  }
-  else {
+  } else {
     name_str = Thread::name();
   }
   assert(name_str != NULL, "unexpected NULL thread name");
@@ -2950,7 +2952,7 @@
   Handle thread_oop(Thread::current(),
                     JNIHandles::resolve_non_null(jni_thread));
   assert(InstanceKlass::cast(thread_oop->klass())->is_linked(),
-    "must be initialized");
+         "must be initialized");
   set_threadObj(thread_oop());
   java_lang_Thread::set_thread(thread_oop(), this);
 
@@ -3165,8 +3167,9 @@
 }
 
 // Create a CompilerThread
-CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters)
-: JavaThread(&compiler_thread_entry) {
+CompilerThread::CompilerThread(CompileQueue* queue,
+                               CompilerCounters* counters)
+                               : JavaThread(&compiler_thread_entry) {
   _env   = NULL;
   _log   = NULL;
   _task  = NULL;
@@ -3231,8 +3234,9 @@
   // way to prevent termination of WatcherThread would be to acquire
   // Terminator_lock, but we can't do that without violating the lock rank
   // checking in some cases.
-  if (wt != NULL)
+  if (wt != NULL) {
     tc->do_thread(wt);
+  }
 
   // If CompilerThreads ever become non-JavaThreads, add them here
 }
@@ -3290,7 +3294,6 @@
 }
 
 jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
-
   extern void JDK_Version_init();
 
   // Check version
@@ -3383,7 +3386,7 @@
 
   if (!main_thread->set_as_starting_thread()) {
     vm_shutdown_during_initialization(
-      "Failed necessary internal allocation. Out of swap space");
+                                      "Failed necessary internal allocation. Out of swap space");
     delete main_thread;
     *canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
     return JNI_ENOMEM;
@@ -3422,8 +3425,10 @@
     VMThread::create();
     Thread* vmthread = VMThread::vm_thread();
 
-    if (!os::create_thread(vmthread, os::vm_thread))
-      vm_exit_during_initialization("Cannot create VM thread. Out of system resources.");
+    if (!os::create_thread(vmthread, os::vm_thread)) {
+      vm_exit_during_initialization("Cannot create VM thread. "
+                                    "Out of system resources.");
+    }
 
     // Wait for the VM thread to become ready, and VMThread::run to initialize
     // Monitors can have spurious returns, must always check another state flag
@@ -3583,17 +3588,17 @@
   }
 
   {
-      MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
-      // Make sure the watcher thread can be started by WatcherThread::start()
-      // or by dynamic enrollment.
-      WatcherThread::make_startable();
-      // Start up the WatcherThread if there are any periodic tasks
-      // NOTE:  All PeriodicTasks should be registered by now. If they
-      //   aren't, late joiners might appear to start slowly (we might
-      //   take a while to process their first tick).
-      if (PeriodicTask::num_tasks() > 0) {
-          WatcherThread::start();
-      }
+    MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+    // Make sure the watcher thread can be started by WatcherThread::start()
+    // or by dynamic enrollment.
+    WatcherThread::make_startable();
+    // Start up the WatcherThread if there are any periodic tasks
+    // NOTE:  All PeriodicTasks should be registered by now. If they
+    //   aren't, late joiners might appear to start slowly (we might
+    //   take a while to process their first tick).
+    if (PeriodicTask::num_tasks() > 0) {
+      WatcherThread::start();
+    }
   }
 
   // Give os specific code one last chance to start
@@ -3613,7 +3618,9 @@
 // Find a command line agent library and return its entry point for
 //         -agentlib:  -agentpath:   -Xrun
 // num_symbol_entries must be passed-in since only the caller knows the number of symbols in the array.
-static OnLoadEntry_t lookup_on_load(AgentLibrary* agent, const char *on_load_symbols[], size_t num_symbol_entries) {
+static OnLoadEntry_t lookup_on_load(AgentLibrary* agent,
+                                    const char *on_load_symbols[],
+                                    size_t num_symbol_entries) {
   OnLoadEntry_t on_load_entry = NULL;
   void *library = NULL;
 
@@ -3749,10 +3756,10 @@
 
     // Find the Agent_OnUnload function.
     Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
-      os::find_agent_function(agent,
-      false,
-      on_unload_symbols,
-      num_symbol_entries));
+                                                   os::find_agent_function(agent,
+                                                   false,
+                                                   on_unload_symbols,
+                                                   num_symbol_entries));
 
     // Invoke the Agent_OnUnload function
     if (unload_entry != NULL) {
@@ -4006,8 +4013,9 @@
 
       // Only one thread left, do a notify on the Threads_lock so a thread waiting
       // on destroy_vm will wake up.
-      if (number_of_non_daemon_threads() == 1)
+      if (number_of_non_daemon_threads() == 1) {
         Threads_lock->notify_all();
+      }
     }
     ThreadService::remove_thread(p, daemon);
 
@@ -4060,7 +4068,7 @@
   bool is_par = sh->n_par_threads() > 0;
   assert(!is_par ||
          (SharedHeap::heap()->n_par_threads() ==
-          SharedHeap::heap()->workers()->active_workers()), "Mismatch");
+         SharedHeap::heap()->workers()->active_workers()), "Mismatch");
   int cp = SharedHeap::heap()->strong_roots_parity();
   ALL_JAVA_THREADS(p) {
     if (p->claim_oops_do(is_par, cp)) {
@@ -4113,9 +4121,10 @@
 
 // Get count Java threads that are waiting to enter the specified monitor.
 GrowableArray<JavaThread*>* Threads::get_pending_threads(int count,
-  address monitor, bool doLock) {
+                                                         address monitor,
+                                                         bool doLock) {
   assert(doLock || SafepointSynchronize::is_at_safepoint(),
-    "must grab Threads_lock or be at safepoint");
+         "must grab Threads_lock or be at safepoint");
   GrowableArray<JavaThread*>* result = new GrowableArray<JavaThread*>(count);
 
   int i = 0;
@@ -4135,7 +4144,8 @@
 }
 
 
-JavaThread *Threads::owning_thread_from_monitor_owner(address owner, bool doLock) {
+JavaThread *Threads::owning_thread_from_monitor_owner(address owner,
+                                                      bool doLock) {
   assert(doLock ||
          Threads_lock->owned_by_self() ||
          SafepointSynchronize::is_at_safepoint(),
@@ -4156,7 +4166,6 @@
   // like deadlock detection.
   if (UseHeavyMonitors) return NULL;
 
-  //
   // If we didn't find a matching Java thread and we didn't force use of
   // heavyweight monitors, then the owner is the stack address of the
   // Lock Word in the owning Java thread's stack.
@@ -4176,15 +4185,15 @@
 }
 
 // Threads::print_on() is called at safepoint by VM_PrintThreads operation.
-void Threads::print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks) {
+void Threads::print_on(outputStream* st, bool print_stacks,
+                       bool internal_format, bool print_concurrent_locks) {
   char buf[32];
   st->print_cr("%s", os::local_time_string(buf, sizeof(buf)));
 
   st->print_cr("Full thread dump %s (%s %s):",
-                Abstract_VM_Version::vm_name(),
-                Abstract_VM_Version::vm_release(),
-                Abstract_VM_Version::vm_info_string()
-               );
+               Abstract_VM_Version::vm_name(),
+               Abstract_VM_Version::vm_release(),
+               Abstract_VM_Version::vm_info_string());
   st->cr();
 
 #if INCLUDE_ALL_GCS
@@ -4229,7 +4238,8 @@
 // that VM is not at safepoint and/or current thread is inside signal handler.
 // Don't print stack trace, as the stack may not be walkable. Don't allocate
 // memory (even in resource area), it might deadlock the error handler.
-void Threads::print_on_error(outputStream* st, Thread* current, char* buf, int buflen) {
+void Threads::print_on_error(outputStream* st, Thread* current, char* buf,
+                             int buflen) {
   bool found_current = false;
   st->print_cr("Java Threads: ( => current thread )");
   ALL_JAVA_THREADS(thread) {
@@ -4301,9 +4311,9 @@
 
 typedef volatile int SpinLockT;
 
-void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
+void Thread::SpinAcquire(volatile int * adr, const char * LockName) {
   if (Atomic::cmpxchg (1, adr, 0) == 0) {
-     return;   // normal fast-path return
+    return;   // normal fast-path return
   }
 
   // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
@@ -4311,24 +4321,24 @@
   int ctr = 0;
   int Yields = 0;
   for (;;) {
-     while (*adr != 0) {
-        ++ctr;
-        if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
-           if (Yields > 5) {
-             os::naked_short_sleep(1);
-           } else {
-             os::naked_yield();
-             ++Yields;
-           }
+    while (*adr != 0) {
+      ++ctr;
+      if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
+        if (Yields > 5) {
+          os::naked_short_sleep(1);
         } else {
-           SpinPause();
+          os::naked_yield();
+          ++Yields;
         }
-     }
-     if (Atomic::cmpxchg(1, adr, 0) == 0) return;
+      } else {
+        SpinPause();
+      }
+    }
+    if (Atomic::cmpxchg(1, adr, 0) == 0) return;
   }
 }
 
-void Thread::SpinRelease (volatile int * adr) {
+void Thread::SpinRelease(volatile int * adr) {
   assert(*adr != 0, "invariant");
   OrderAccess::fence();      // guarantee at least release consistency.
   // Roach-motel semantics.
@@ -4397,53 +4407,53 @@
 typedef volatile intptr_t MutexT;      // Mux Lock-word
 enum MuxBits { LOCKBIT = 1 };
 
-void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
+void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
   intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0);
   if (w == 0) return;
   if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
-     return;
+    return;
   }
 
   TEVENT(muxAcquire - Contention);
   ParkEvent * const Self = Thread::current()->_MuxEvent;
   assert((intptr_t(Self) & LOCKBIT) == 0, "invariant");
   for (;;) {
-     int its = (os::is_MP() ? 100 : 0) + 1;
-
-     // Optional spin phase: spin-then-park strategy
-     while (--its >= 0) {
-       w = *Lock;
-       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+    int its = (os::is_MP() ? 100 : 0) + 1;
+
+    // Optional spin phase: spin-then-park strategy
+    while (--its >= 0) {
+      w = *Lock;
+      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+        return;
+      }
+    }
+
+    Self->reset();
+    Self->OnList = intptr_t(Lock);
+    // The following fence() isn't _strictly necessary as the subsequent
+    // CAS() both serializes execution and ratifies the fetched *Lock value.
+    OrderAccess::fence();
+    for (;;) {
+      w = *Lock;
+      if ((w & LOCKBIT) == 0) {
+        if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+          Self->OnList = 0;   // hygiene - allows stronger asserts
           return;
-       }
-     }
-
-     Self->reset();
-     Self->OnList = intptr_t(Lock);
-     // The following fence() isn't _strictly necessary as the subsequent
-     // CAS() both serializes execution and ratifies the fetched *Lock value.
-     OrderAccess::fence();
-     for (;;) {
-        w = *Lock;
-        if ((w & LOCKBIT) == 0) {
-            if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
-                Self->OnList = 0;   // hygiene - allows stronger asserts
-                return;
-            }
-            continue;      // Interference -- *Lock changed -- Just retry
         }
-        assert(w & LOCKBIT, "invariant");
-        Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
-        if (Atomic::cmpxchg_ptr(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
-     }
-
-     while (Self->OnList != 0) {
-        Self->park();
-     }
+        continue;      // Interference -- *Lock changed -- Just retry
+      }
+      assert(w & LOCKBIT, "invariant");
+      Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
+      if (Atomic::cmpxchg_ptr(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
+    }
+
+    while (Self->OnList != 0) {
+      Self->park();
+    }
   }
 }
 
-void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
+void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) {
   intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0);
   if (w == 0) return;
   if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
@@ -4528,7 +4538,7 @@
 // bidirectional fence/MEMBAR semantics, ensuring that all prior memory operations
 // executed within the critical section are complete and globally visible before the
 // store (CAS) to the lock-word that releases the lock becomes globally visible.
-void Thread::muxRelease (volatile intptr_t * Lock)  {
+void Thread::muxRelease(volatile intptr_t * Lock)  {
   for (;;) {
     const intptr_t w = Atomic::cmpxchg_ptr(0, Lock, LOCKBIT);
     assert(w & LOCKBIT, "invariant");
--- a/hotspot/src/share/vm/runtime/thread.hpp	Wed Sep 10 09:52:41 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Wed Sep 10 17:06:36 2014 -0700
@@ -115,7 +115,7 @@
   void  operator delete(void* p);
 
  protected:
-   static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
+  static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
  private:
 
   // ***************************************************************
@@ -178,7 +178,6 @@
   // 2. It would be more natural if set_external_suspend() is private and
   // part of java_suspend(), but that probably would affect the suspend/query
   // performance. Need more investigation on this.
-  //
 
   // suspend/resume lock: used for self-suspend
   Monitor* _SR_lock;
@@ -225,10 +224,10 @@
   // claimed as a task.
   jint _oops_do_parity;
 
-  public:
-   void set_last_handle_mark(HandleMark* mark)   { _last_handle_mark = mark; }
-   HandleMark* last_handle_mark() const          { return _last_handle_mark; }
-  private:
+ public:
+  void set_last_handle_mark(HandleMark* mark)   { _last_handle_mark = mark; }
+  HandleMark* last_handle_mark() const          { return _last_handle_mark; }
+ private:
 
   // debug support for checking if code does allow safepoints or not
   // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on
@@ -445,9 +444,9 @@
   virtual void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
   // Handles the parallel case for the method below.
-private:
+ private:
   bool claim_oops_do_par_case(int collection_parity);
-public:
+ public:
   // Requires that "collection_parity" is that of the current roots
   // iteration.  If "is_par" is false, sets the parity of "this" to
   // "collection_parity", and returns "true".  If "is_par" is true,
@@ -514,7 +513,7 @@
   void    record_stack_base_and_size();
 
   bool    on_local_stack(address adr) const {
-    /* QQQ this has knowledge of direction, ought to be a stack method */
+    // QQQ this has knowledge of direction, ought to be a stack method
     return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
   }
 
@@ -624,8 +623,8 @@
 
 inline Thread* Thread::current() {
 #ifdef ASSERT
-// This function is very high traffic. Define PARANOID to enable expensive
-// asserts.
+  // This function is very high traffic. Define PARANOID to enable expensive
+  // asserts.
 #ifdef PARANOID
   // Signal handler should call ThreadLocalStorage::get_thread_slow()
   Thread* t = ThreadLocalStorage::get_thread_slow();
@@ -664,9 +663,9 @@
 
 // Worker threads are named and have an id of an assigned work.
 class WorkerThread: public NamedThread {
-private:
+ private:
   uint _id;
-public:
+ public:
   WorkerThread() : _id(0)               { }
   virtual bool is_Worker_thread() const { return true; }
 
@@ -843,8 +842,8 @@
   jint                  _in_deopt_handler;       // count of deoptimization
                                                  // handlers thread is in
   volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
-  bool                  _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was
-                                                 // never locked) when throwing an exception. Used by interpreter only.
+  bool                  _do_not_unlock_if_synchronized;  // Do not unlock the receiver of a synchronized method (since it was
+                                                         // never locked) when throwing an exception. Used by interpreter only.
 
   // JNI attach states:
   enum JNIAttachStates {
@@ -898,13 +897,13 @@
 #ifndef PRODUCT
   int _jmp_ring_index;
   struct {
-      // We use intptr_t instead of address so debugger doesn't try and display strings
-      intptr_t _target;
-      intptr_t _instruction;
-      const char*  _file;
-      int _line;
+    // We use intptr_t instead of address so debugger doesn't try and display strings
+    intptr_t _target;
+    intptr_t _instruction;
+    const char*  _file;
+    int _line;
   }   _jmp_ring[jump_ring_buffer_size];
-#endif /* PRODUCT */
+#endif // PRODUCT
 
 #if INCLUDE_ALL_GCS
   // Support for G1 barriers
@@ -1071,7 +1070,7 @@
     // Warning: is_ext_suspend_completed() may temporarily drop the
     // SR_lock to allow the thread to reach a stable thread state if
     // it is currently in a transient thread state.
-    return is_ext_suspend_completed(false /*!called_by_wait */,
+    return is_ext_suspend_completed(false /* !called_by_wait */,
                                     SuspendRetryDelay, bits);
   }
 
@@ -1096,7 +1095,7 @@
   // Whenever a thread transitions from native to vm/java it must suspend
   // if external|deopt suspend is present.
   bool is_suspend_after_native() const {
-    return (_suspend_flags & (_external_suspend | _deopt_suspend) ) != 0;
+    return (_suspend_flags & (_external_suspend | _deopt_suspend)) != 0;
   }
 
   // external suspend request is completed
@@ -1113,7 +1112,7 @@
   // when a suspend equivalent condition lifts.
   bool handle_special_suspend_equivalent_condition() {
     assert(is_suspend_equivalent(),
-      "should only be called in a suspend equivalence condition");
+           "should only be called in a suspend equivalence condition");
     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
     bool ret = is_external_suspend();
     if (!ret) {
@@ -1137,8 +1136,8 @@
 
   bool is_suspend_equivalent() const             { return _suspend_equivalent; }
 
-  void set_suspend_equivalent()                  { _suspend_equivalent = true; };
-  void clear_suspend_equivalent()                { _suspend_equivalent = false; };
+  void set_suspend_equivalent()                  { _suspend_equivalent = true; }
+  void clear_suspend_equivalent()                { _suspend_equivalent = false; }
 
   // Thread.stop support
   void send_thread_stop(oop throwable);
@@ -1238,18 +1237,25 @@
 
   // Stack overflow support
   inline size_t stack_available(address cur_sp);
-  address stack_yellow_zone_base()
-    { return (address)(stack_base() - (stack_size() - (stack_red_zone_size() + stack_yellow_zone_size()))); }
-  size_t  stack_yellow_zone_size()
-    { return StackYellowPages * os::vm_page_size(); }
-  address stack_red_zone_base()
-    { return (address)(stack_base() - (stack_size() - stack_red_zone_size())); }
-  size_t stack_red_zone_size()
-    { return StackRedPages * os::vm_page_size(); }
-  bool in_stack_yellow_zone(address a)
-    { return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base()); }
-  bool in_stack_red_zone(address a)
-    { return (a <= stack_red_zone_base()) && (a >= (address)((intptr_t)stack_base() - stack_size())); }
+  address stack_yellow_zone_base() {
+    return (address)(stack_base() -
+                     (stack_size() -
+                     (stack_red_zone_size() + stack_yellow_zone_size())));
+  }
+  size_t  stack_yellow_zone_size() {
+    return StackYellowPages * os::vm_page_size();
+  }
+  address stack_red_zone_base() {
+    return (address)(stack_base() - (stack_size() - stack_red_zone_size()));
+  }
+  size_t stack_red_zone_size() { return StackRedPages * os::vm_page_size(); }
+  bool in_stack_yellow_zone(address a) {
+    return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base());
+  }
+  bool in_stack_red_zone(address a) {
+    return (a <= stack_red_zone_base()) &&
+           (a >= (address)((intptr_t)stack_base() - stack_size()));
+  }
 
   void create_stack_guard_pages();
   void remove_stack_guard_pages();
@@ -1289,14 +1295,14 @@
 
 #ifndef PRODUCT
   void record_jump(address target, address instr, const char* file, int line);
-#endif /* PRODUCT */
+#endif // PRODUCT
 
   // For assembly stub generation
   static ByteSize threadObj_offset()             { return byte_offset_of(JavaThread, _threadObj); }
 #ifndef PRODUCT
   static ByteSize jmp_ring_index_offset()        { return byte_offset_of(JavaThread, _jmp_ring_index); }
   static ByteSize jmp_ring_offset()              { return byte_offset_of(JavaThread, _jmp_ring); }
-#endif /* PRODUCT */
+#endif // PRODUCT
   static ByteSize jni_environment_offset()       { return byte_offset_of(JavaThread, _jni_environment); }
   static ByteSize last_Java_sp_offset() {
     return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset();
@@ -1339,25 +1345,28 @@
     // Only return NULL if thread is off the thread list; starting to
     // exit should not return NULL.
     if (thread_from_jni_env->is_terminated()) {
-       thread_from_jni_env->block_if_vm_exited();
-       return NULL;
+      thread_from_jni_env->block_if_vm_exited();
+      return NULL;
     } else {
-       return thread_from_jni_env;
+      return thread_from_jni_env;
     }
   }
 
   // JNI critical regions. These can nest.
   bool in_critical()    { return _jni_active_critical > 0; }
   bool in_last_critical()  { return _jni_active_critical == 1; }
-  void enter_critical() { assert(Thread::current() == this ||
-                                 Thread::current()->is_VM_thread() && SafepointSynchronize::is_synchronizing(),
-                                 "this must be current thread or synchronizing");
-                          _jni_active_critical++; }
-  void exit_critical()  { assert(Thread::current() == this,
-                                 "this must be current thread");
-                          _jni_active_critical--;
-                          assert(_jni_active_critical >= 0,
-                                 "JNI critical nesting problem?"); }
+  void enter_critical() {
+    assert(Thread::current() == this ||
+           (Thread::current()->is_VM_thread() &&
+           SafepointSynchronize::is_synchronizing()),
+           "this must be current thread or synchronizing");
+    _jni_active_critical++;
+  }
+  void exit_critical() {
+    assert(Thread::current() == this, "this must be current thread");
+    _jni_active_critical--;
+    assert(_jni_active_critical >= 0, "JNI critical nesting problem?");
+  }
 
   // Checked JNI, is the programmer required to check for exceptions, specify which function name
   bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; }
@@ -1406,15 +1415,15 @@
   char* name() const { return (char*)get_thread_name(); }
   void print_on(outputStream* st) const;
   void print_value();
-  void print_thread_state_on(outputStream* ) const      PRODUCT_RETURN;
-  void print_thread_state() const                       PRODUCT_RETURN;
+  void print_thread_state_on(outputStream*) const      PRODUCT_RETURN;
+  void print_thread_state() const                      PRODUCT_RETURN;
   void print_on_error(outputStream* st, char* buf, int buflen) const;
   void verify();
   const char* get_thread_name() const;
-private:
+ private:
   // factor out low-level mechanics for use in both normal and error cases
   const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const;
-public:
+ public:
   const char* get_threadgroup_name() const;
   const char* get_parent_name() const;
 
@@ -1456,20 +1465,20 @@
 
   // Profiling operation (see fprofile.cpp)
  public:
-   bool profile_last_Java_frame(frame* fr);
+  bool profile_last_Java_frame(frame* fr);
 
  private:
-   ThreadProfiler* _thread_profiler;
+  ThreadProfiler* _thread_profiler;
  private:
-   friend class FlatProfiler;                    // uses both [gs]et_thread_profiler.
-   friend class FlatProfilerTask;                // uses get_thread_profiler.
-   friend class ThreadProfilerMark;              // uses get_thread_profiler.
-   ThreadProfiler* get_thread_profiler()         { return _thread_profiler; }
-   ThreadProfiler* set_thread_profiler(ThreadProfiler* tp) {
-     ThreadProfiler* result = _thread_profiler;
-     _thread_profiler = tp;
-     return result;
-   }
+  friend class FlatProfiler;                    // uses both [gs]et_thread_profiler.
+  friend class FlatProfilerTask;                // uses get_thread_profiler.
+  friend class ThreadProfilerMark;              // uses get_thread_profiler.
+  ThreadProfiler* get_thread_profiler()         { return _thread_profiler; }
+  ThreadProfiler* set_thread_profiler(ThreadProfiler* tp) {
+    ThreadProfiler* result = _thread_profiler;
+    _thread_profiler = tp;
+    return result;
+  }
 
  public:
   // Returns the running thread as a JavaThread
@@ -1692,15 +1701,15 @@
 
 
   // JSR166 per-thread parker
-private:
+ private:
   Parker*    _parker;
-public:
+ public:
   Parker*     parker() { return _parker; }
 
   // Biased locking support
-private:
+ private:
   GrowableArray<MonitorInfo*>* _cached_monitor_info;
-public:
+ public:
   GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; }
   void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; }
 
@@ -1708,12 +1717,12 @@
   bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; }
   bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; }
   inline void set_done_attaching_via_jni();
-private:
+ private:
   // This field is used to determine if a thread has claimed
   // a par_id: it is UINT_MAX if the thread has not claimed a par_id;
   // otherwise its value is the par_id that has been claimed.
   uint _claimed_par_id;
-public:
+ public:
   uint get_claimed_par_id() { return _claimed_par_id; }
   void set_claimed_par_id(uint id) { _claimed_par_id = id; }
 };
@@ -1766,7 +1775,7 @@
   void          set_env(ciEnv* env)              { _env = env; }
 
   BufferBlob*   get_buffer_blob() const          { return _buffer_blob; }
-  void          set_buffer_blob(BufferBlob* b)   { _buffer_blob = b; };
+  void          set_buffer_blob(BufferBlob* b)   { _buffer_blob = b; }
 
   // Get/set the thread's logging information
   CompileLog*   log()                            { return _log; }
@@ -1782,9 +1791,9 @@
   void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
 #ifndef PRODUCT
-private:
+ private:
   IdealGraphPrinter *_ideal_graph_printer;
-public:
+ public:
   IdealGraphPrinter *ideal_graph_printer()                       { return _ideal_graph_printer; }
   void set_ideal_graph_printer(IdealGraphPrinter *n)             { _ideal_graph_printer = n; }
 #endif
@@ -1885,13 +1894,13 @@
   // is true, then Threads_lock is grabbed as needed. Otherwise, the
   // VM needs to be at a safepoint.
   static GrowableArray<JavaThread*>* get_pending_threads(int count,
-    address monitor, bool doLock);
+                                                         address monitor, bool doLock);
 
   // Get owning Java thread from the monitor's owner field. If doLock
   // is true, then Threads_lock is grabbed as needed. Otherwise, the
   // VM needs to be at a safepoint.
   static JavaThread *owning_thread_from_monitor_owner(address owner,
-    bool doLock);
+                                                      bool doLock);
 
   // Number of threads on the active threads list
   static int number_of_threads()                 { return _number_of_threads; }
@@ -1911,9 +1920,9 @@
 };
 
 class SignalHandlerMark: public StackObj {
-private:
+ private:
   Thread* _thread;
-public:
+ public:
   SignalHandlerMark(Thread* t) {
     _thread = t;
     if (_thread) _thread->enter_signal_handler();