8057107: cleanup indent white space issues prior to Contended Locking reorder and cache line bucket
authordcubed
Wed, 10 Sep 2014 11:48:20 -0600
changeset 26683 a02753d5a0b2
parent 26331 8f17e084029b
child 26684 d1221849ea3d
8057107: cleanup indent white space issues prior to Contended Locking reorder and cache line bucket Reviewed-by: fparain, sspitsyn, coleenp
hotspot/src/os/bsd/vm/os_bsd.cpp
hotspot/src/os/bsd/vm/os_bsd.hpp
hotspot/src/os/linux/vm/os_linux.cpp
hotspot/src/os/linux/vm/os_linux.hpp
hotspot/src/os/solaris/vm/os_solaris.cpp
hotspot/src/os/solaris/vm/os_solaris.hpp
hotspot/src/os/windows/vm/os_windows.cpp
hotspot/src/share/vm/runtime/atomic.hpp
hotspot/src/share/vm/runtime/mutex.cpp
hotspot/src/share/vm/runtime/objectMonitor.cpp
hotspot/src/share/vm/runtime/objectMonitor.hpp
hotspot/src/share/vm/runtime/sharedRuntime.hpp
hotspot/src/share/vm/runtime/synchronizer.cpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/thread.hpp
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Sep 10 11:48:20 2014 -0600
@@ -260,11 +260,11 @@
   mib[1] = HW_NCPU;
   len = sizeof(cpu_val);
   if (sysctl(mib, 2, &cpu_val, &len, NULL, 0) != -1 && cpu_val >= 1) {
-       assert(len == sizeof(cpu_val), "unexpected data size");
-       set_processor_count(cpu_val);
+    assert(len == sizeof(cpu_val), "unexpected data size");
+    set_processor_count(cpu_val);
   }
   else {
-       set_processor_count(1);   // fallback
+    set_processor_count(1);   // fallback
   }
 
   /* get physical memory via hw.memsize sysctl (hw.memsize is used
@@ -284,19 +284,19 @@
 
   len = sizeof(mem_val);
   if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1) {
-       assert(len == sizeof(mem_val), "unexpected data size");
-       _physical_memory = mem_val;
+    assert(len == sizeof(mem_val), "unexpected data size");
+    _physical_memory = mem_val;
   } else {
-       _physical_memory = 256*1024*1024;       // fallback (XXXBSD?)
+    _physical_memory = 256*1024*1024;       // fallback (XXXBSD?)
   }
 
 #ifdef __OpenBSD__
   {
-       // limit _physical_memory memory view on OpenBSD since
-       // datasize rlimit restricts us anyway.
-       struct rlimit limits;
-       getrlimit(RLIMIT_DATA, &limits);
-       _physical_memory = MIN2(_physical_memory, (julong)limits.rlim_cur);
+    // limit _physical_memory memory view on OpenBSD since
+    // datasize rlimit restricts us anyway.
+    struct rlimit limits;
+    getrlimit(RLIMIT_DATA, &limits);
+    _physical_memory = MIN2(_physical_memory, (julong)limits.rlim_cur);
   }
 #endif
 }
@@ -561,14 +561,14 @@
 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 
 bool os::Bsd::is_sig_ignored(int sig) {
-      struct sigaction oact;
-      sigaction(sig, (struct sigaction*)NULL, &oact);
-      void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
-                                     : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
-      if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
-           return true;
-      else
-           return false;
+  struct sigaction oact;
+  sigaction(sig, (struct sigaction*)NULL, &oact);
+  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
+                                 : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
+  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
+    return true;
+  else
+    return false;
 }
 
 void os::Bsd::signal_sets_init() {
@@ -596,18 +596,18 @@
   sigaddset(&unblocked_sigs, SR_signum);
 
   if (!ReduceSignalUsage) {
-   if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
+    if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
-   }
-   if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
+    }
+    if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
-   }
-   if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
+    }
+    if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
-   }
+    }
   }
   // Fill in signals that are blocked by all but the VM thread.
   sigemptyset(&vm_sigs);
@@ -846,9 +846,9 @@
 
   // Aborted due to thread limit being reached
   if (state == ZOMBIE) {
-      thread->set_osthread(NULL);
-      delete osthread;
-      return false;
+    thread->set_osthread(NULL);
+    delete osthread;
+    return false;
   }
 
   // The thread is returned suspended (in state INITIALIZED),
@@ -868,7 +868,7 @@
 
 bool os::create_attached_thread(JavaThread* thread) {
 #ifdef ASSERT
-    thread->verify_not_published();
+  thread->verify_not_published();
 #endif
 
   // Allocate the OSThread object
@@ -919,7 +919,7 @@
     // Restore caller's signal mask
     sigset_t sigmask = osthread->caller_sigmask();
     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
-   }
+  }
 
   delete osthread;
 }
@@ -1023,27 +1023,27 @@
 #ifdef __APPLE__
 
 jlong os::javaTimeNanos() {
-    const uint64_t tm = mach_absolute_time();
-    const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom;
-    const uint64_t prev = Bsd::_max_abstime;
-    if (now <= prev) {
-      return prev;   // same or retrograde time;
-    }
-    const uint64_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&Bsd::_max_abstime, prev);
-    assert(obsv >= prev, "invariant");   // Monotonicity
-    // If the CAS succeeded then we're done and return "now".
-    // If the CAS failed and the observed value "obsv" is >= now then
-    // we should return "obsv".  If the CAS failed and now > obsv > prv then
-    // some other thread raced this thread and installed a new value, in which case
-    // we could either (a) retry the entire operation, (b) retry trying to install now
-    // or (c) just return obsv.  We use (c).   No loop is required although in some cases
-    // we might discard a higher "now" value in deference to a slightly lower but freshly
-    // installed obsv value.   That's entirely benign -- it admits no new orderings compared
-    // to (a) or (b) -- and greatly reduces coherence traffic.
-    // We might also condition (c) on the magnitude of the delta between obsv and now.
-    // Avoiding excessive CAS operations to hot RW locations is critical.
-    // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
-    return (prev == obsv) ? now : obsv;
+  const uint64_t tm = mach_absolute_time();
+  const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom;
+  const uint64_t prev = Bsd::_max_abstime;
+  if (now <= prev) {
+    return prev;   // same or retrograde time;
+  }
+  const uint64_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&Bsd::_max_abstime, prev);
+  assert(obsv >= prev, "invariant");   // Monotonicity
+  // If the CAS succeeded then we're done and return "now".
+  // If the CAS failed and the observed value "obsv" is >= now then
+  // we should return "obsv".  If the CAS failed and now > obsv > prv then
+  // some other thread raced this thread and installed a new value, in which case
+  // we could either (a) retry the entire operation, (b) retry trying to install now
+  // or (c) just return obsv.  We use (c).   No loop is required although in some cases
+  // we might discard a higher "now" value in deference to a slightly lower but freshly
+  // installed obsv value.   That's entirely benign -- it admits no new orderings compared
+  // to (a) or (b) -- and greatly reduces coherence traffic.
+  // We might also condition (c) on the magnitude of the delta between obsv and now.
+  // Avoiding excessive CAS operations to hot RW locations is critical.
+  // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
+  return (prev == obsv) ? now : obsv;
 }
 
 #else // __APPLE__
@@ -1307,7 +1307,7 @@
         continue; // skip the empty path values
       }
       snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX,
-          pelements[i], fname);
+               pelements[i], fname);
       if (file_exists(buffer)) {
         retval = true;
         break;
@@ -1372,14 +1372,14 @@
     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
                           buf, buflen, offset, dlinfo.dli_fname)) {
-         return true;
+        return true;
       }
     }
 
     // Handle non-dynamic manually:
     if (dlinfo.dli_fbase != NULL &&
         Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset,
-                        dlinfo.dli_fbase)) {
+        dlinfo.dli_fbase)) {
       if (!Decoder::demangle(localbuf, buf, buflen)) {
         jio_snprintf(buf, buflen, "%s", localbuf);
       }
@@ -1465,7 +1465,7 @@
 
   bool failed_to_read_elf_head=
     (sizeof(elf_head)!=
-        (::read(file_descriptor, &elf_head,sizeof(elf_head))));
+     (::read(file_descriptor, &elf_head,sizeof(elf_head))));
 
   ::close(file_descriptor);
   if (failed_to_read_elf_head) {
@@ -1525,33 +1525,33 @@
   };
 
   #if  (defined IA32)
-    static  Elf32_Half running_arch_code=EM_386;
+  static  Elf32_Half running_arch_code=EM_386;
   #elif   (defined AMD64)
-    static  Elf32_Half running_arch_code=EM_X86_64;
+  static  Elf32_Half running_arch_code=EM_X86_64;
   #elif  (defined IA64)
-    static  Elf32_Half running_arch_code=EM_IA_64;
+  static  Elf32_Half running_arch_code=EM_IA_64;
   #elif  (defined __sparc) && (defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARCV9;
+  static  Elf32_Half running_arch_code=EM_SPARCV9;
   #elif  (defined __sparc) && (!defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARC;
+  static  Elf32_Half running_arch_code=EM_SPARC;
   #elif  (defined __powerpc64__)
-    static  Elf32_Half running_arch_code=EM_PPC64;
+  static  Elf32_Half running_arch_code=EM_PPC64;
   #elif  (defined __powerpc__)
-    static  Elf32_Half running_arch_code=EM_PPC;
+  static  Elf32_Half running_arch_code=EM_PPC;
   #elif  (defined ARM)
-    static  Elf32_Half running_arch_code=EM_ARM;
+  static  Elf32_Half running_arch_code=EM_ARM;
   #elif  (defined S390)
-    static  Elf32_Half running_arch_code=EM_S390;
+  static  Elf32_Half running_arch_code=EM_S390;
   #elif  (defined ALPHA)
-    static  Elf32_Half running_arch_code=EM_ALPHA;
+  static  Elf32_Half running_arch_code=EM_ALPHA;
   #elif  (defined MIPSEL)
-    static  Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
+  static  Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
   #elif  (defined PARISC)
-    static  Elf32_Half running_arch_code=EM_PARISC;
+  static  Elf32_Half running_arch_code=EM_PARISC;
   #elif  (defined MIPS)
-    static  Elf32_Half running_arch_code=EM_MIPS;
+  static  Elf32_Half running_arch_code=EM_MIPS;
   #elif  (defined M68K)
-    static  Elf32_Half running_arch_code=EM_68K;
+  static  Elf32_Half running_arch_code=EM_68K;
   #else
     #error Method os::dll_load requires that one of following is defined:\
          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
@@ -1574,7 +1574,7 @@
   }
 
   assert(running_arch_index != -1,
-    "Didn't find running architecture code (running_arch_code) in arch_array");
+         "Didn't find running architecture code (running_arch_code) in arch_array");
   if (running_arch_index == -1) {
     // Even though running architecture detection failed
     // we may still continue with reporting dlerror() message
@@ -1596,13 +1596,13 @@
   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
     if (lib_arch.name!=NULL) {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-        " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
-        lib_arch.name, arch_array[running_arch_index].name);
+                 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
+                 lib_arch.name, arch_array[running_arch_index].name);
     } else {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-      " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
-        lib_arch.code,
-        arch_array[running_arch_index].name);
+                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
+                 lib_arch.code,
+                 arch_array[running_arch_index].name);
     }
   }
 
@@ -1630,7 +1630,7 @@
 static bool _print_ascii_file(const char* filename, outputStream* st) {
   int fd = ::open(filename, O_RDONLY);
   if (fd == -1) {
-     return false;
+    return false;
   }
 
   char buf[32];
@@ -1785,8 +1785,8 @@
 
   char dli_fname[MAXPATHLEN];
   bool ret = dll_address_to_library_name(
-                CAST_FROM_FN_PTR(address, os::jvm_path),
-                dli_fname, sizeof(dli_fname), NULL);
+                                         CAST_FROM_FN_PTR(address, os::jvm_path),
+                                         dli_fname, sizeof(dli_fname), NULL);
   assert(ret, "cannot locate libjvm");
   char *rp = NULL;
   if (ret && dli_fname[0] != '\0') {
@@ -1884,12 +1884,12 @@
   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
   // don't want to flood the manager thread with sem_post requests.
   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
-      return;
+    return;
 
   // Ctrl-C is pressed during error reporting, likely because the error
   // handler fails to abort. Let VM die immediately.
   if (sig == SIGINT && is_error_reported()) {
-     os::die();
+    os::die();
   }
 
   os::signal_notify(sig);
@@ -1952,16 +1952,16 @@
 #endif
 
 class Semaphore : public StackObj {
-  public:
-    Semaphore();
-    ~Semaphore();
-    void signal();
-    void wait();
-    bool trywait();
-    bool timedwait(unsigned int sec, int nsec);
-  private:
-    jlong currenttime() const;
-    os_semaphore_t _semaphore;
+ public:
+  Semaphore();
+  ~Semaphore();
+  void signal();
+  void wait();
+  bool trywait();
+  bool timedwait(unsigned int sec, int nsec);
+ private:
+  jlong currenttime() const;
+  os_semaphore_t _semaphore;
 };
 
 Semaphore::Semaphore() : _semaphore(0) {
@@ -1981,9 +1981,9 @@
 }
 
 jlong Semaphore::currenttime() const {
-    struct timeval tv;
-    gettimeofday(&tv, NULL);
-    return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000);
+  struct timeval tv;
+  gettimeofday(&tv, NULL);
+  return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000);
 }
 
 #ifdef __APPLE__
@@ -2180,7 +2180,7 @@
   }
 #else
   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
-                                   MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
+                                     MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
   if (res != (uintptr_t) MAP_FAILED) {
     return true;
   }
@@ -2194,7 +2194,7 @@
 }
 
 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
-                       bool exec) {
+                          bool exec) {
   // alignment_hint is ignored on this OS
   return pd_commit_memory(addr, size, exec);
 }
@@ -2262,7 +2262,7 @@
   return ::mprotect(addr, size, PROT_NONE) == 0;
 #else
   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
-                MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
+                                     MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
   return res  != (uintptr_t) MAP_FAILED;
 #endif
 }
@@ -2323,7 +2323,7 @@
 }
 
 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
-                         size_t alignment_hint) {
+                            size_t alignment_hint) {
   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
 }
 
@@ -2401,24 +2401,24 @@
   // Currently, size is the total size of the heap
   int shmid = shmget(key, bytes, IPC_CREAT|SHM_R|SHM_W);
   if (shmid == -1) {
-     // Possible reasons for shmget failure:
-     // 1. shmmax is too small for Java heap.
-     //    > check shmmax value: cat /proc/sys/kernel/shmmax
-     //    > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
-     // 2. not enough large page memory.
-     //    > check available large pages: cat /proc/meminfo
-     //    > increase amount of large pages:
-     //          echo new_value > /proc/sys/vm/nr_hugepages
-     //      Note 1: different Bsd may use different name for this property,
-     //            e.g. on Redhat AS-3 it is "hugetlb_pool".
-     //      Note 2: it's possible there's enough physical memory available but
-     //            they are so fragmented after a long run that they can't
-     //            coalesce into large pages. Try to reserve large pages when
-     //            the system is still "fresh".
-     if (warn_on_failure) {
-       warning("Failed to reserve shared memory (errno = %d).", errno);
-     }
-     return NULL;
+    // Possible reasons for shmget failure:
+    // 1. shmmax is too small for Java heap.
+    //    > check shmmax value: cat /proc/sys/kernel/shmmax
+    //    > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
+    // 2. not enough large page memory.
+    //    > check available large pages: cat /proc/meminfo
+    //    > increase amount of large pages:
+    //          echo new_value > /proc/sys/vm/nr_hugepages
+    //      Note 1: different Bsd may use different name for this property,
+    //            e.g. on Redhat AS-3 it is "hugetlb_pool".
+    //      Note 2: it's possible there's enough physical memory available but
+    //            they are so fragmented after a long run that they can't
+    //            coalesce into large pages. Try to reserve large pages when
+    //            the system is still "fresh".
+    if (warn_on_failure) {
+      warning("Failed to reserve shared memory (errno = %d).", errno);
+    }
+    return NULL;
   }
 
   // attach to the region
@@ -2432,10 +2432,10 @@
   shmctl(shmid, IPC_RMID, NULL);
 
   if ((intptr_t)addr == -1) {
-     if (warn_on_failure) {
-       warning("Failed to attach shared memory (errno = %d).", err);
-     }
-     return NULL;
+    if (warn_on_failure) {
+      warning("Failed to attach shared memory (errno = %d).", err);
+    }
+    return NULL;
   }
 
   // The memory is committed
@@ -2506,12 +2506,12 @@
   // if kernel honors the hint then we can return immediately.
   char * addr = anon_mmap(requested_addr, bytes, false);
   if (addr == requested_addr) {
-     return requested_addr;
+    return requested_addr;
   }
 
   if (addr != NULL) {
-     // mmap() is successful but it fails to reserve at the requested address
-     anon_munmap(addr, bytes);
+    // mmap() is successful but it fails to reserve at the requested address
+    anon_munmap(addr, bytes);
   }
 
   int i;
@@ -2839,12 +2839,12 @@
   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
     int sig = ::strtol(s, 0, 10);
     if (sig > 0 || sig < NSIG) {
-        SR_signum = sig;
+      SR_signum = sig;
     }
   }
 
   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
-        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
+         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
 
   sigemptyset(&SR_sigset);
   sigaddset(&SR_sigset, SR_signum);
@@ -2977,7 +2977,7 @@
 //
 extern "C" JNIEXPORT int
 JVM_handle_bsd_signal(int signo, siginfo_t* siginfo,
-                        void* ucontext, int abort_if_unrecognized);
+                      void* ucontext, int abort_if_unrecognized);
 
 void signalHandler(int sig, siginfo_t* info, void* uc) {
   assert(info != NULL && uc != NULL, "it must be old kernel");
@@ -3168,12 +3168,12 @@
     signal_setting_t begin_signal_setting = NULL;
     signal_setting_t end_signal_setting = NULL;
     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
-                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
+                                          dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
     if (begin_signal_setting != NULL) {
       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
-                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
+                                          dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
-                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
+                                         dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
       libjsig_is_loaded = true;
       assert(UseSignalChaining, "should enable signal-chaining");
     }
@@ -3203,10 +3203,10 @@
     // exception handling, while leaving the standard BSD signal handlers functional.
     kern_return_t kr;
     kr = task_set_exception_ports(mach_task_self(),
-        EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC,
-        MACH_PORT_NULL,
-        EXCEPTION_STATE_IDENTITY,
-        MACHINE_THREAD_STATE);
+                                  EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC,
+                                  MACH_PORT_NULL,
+                                  EXCEPTION_STATE_IDENTITY,
+                                  MACHINE_THREAD_STATE);
 
     assert(kr == KERN_SUCCESS, "could not set mach task signal handler");
 #endif
@@ -3302,7 +3302,7 @@
 
   // Check: is it our handler?
   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
-     handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
+      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
     // It is our signal handler
     // check for flags, reset system-used one!
     if ((int)sa.sa_flags != os::Bsd::get_our_sigflags(sig)) {
@@ -3542,22 +3542,22 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
-            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
-                    2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
+                                    (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+                                    2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
       threadStackSizeInBytes < os::Bsd::min_stack_allowed) {
-        tty->print_cr("\nThe stack size specified is too small, "
-                      "Specify at least %dk",
-                      os::Bsd::min_stack_allowed/ K);
-        return JNI_ERR;
+    tty->print_cr("\nThe stack size specified is too small, "
+                  "Specify at least %dk",
+                  os::Bsd::min_stack_allowed/ K);
+    return JNI_ERR;
   }
 
   // Make the stack size a multiple of the page size so that
   // the yellow/red zones can be guarded.
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
-        vm_page_size()));
+                                                vm_page_size()));
 
   if (MaxFDLimit) {
     // set the number of file descriptors to max. print out error
@@ -3670,12 +3670,12 @@
 
 ///
 class PcFetcher : public os::SuspendedThreadTask {
-public:
+ public:
   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
   ExtendedPC result();
-protected:
+ protected:
   void do_task(const os::SuspendedThreadTaskContext& context);
-private:
+ private:
   ExtendedPC _epc;
 };
 
@@ -3722,7 +3722,7 @@
     st->print(PTR_FORMAT ": ", addr);
     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
       st->print("%s+%#x", dlinfo.dli_sname,
-                 addr - (intptr_t)dlinfo.dli_saddr);
+                addr - (intptr_t)dlinfo.dli_saddr);
     } else if (dlinfo.dli_fbase != NULL) {
       st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
     } else {
@@ -3892,11 +3892,11 @@
      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
      */
 #ifdef FD_CLOEXEC
-    {
-        int flags = ::fcntl(fd, F_GETFD);
-        if (flags != -1)
-            ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
-    }
+  {
+    int flags = ::fcntl(fd, F_GETFD);
+    if (flags != -1)
+      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
+  }
 #endif
 
   if (o_delete != 0) {
@@ -3960,23 +3960,23 @@
 }
 
 int os::socket_available(int fd, jint *pbytes) {
-   if (fd < 0)
-     return OS_OK;
-
-   int ret;
-
-   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
-
-   //%% note ioctl can return 0 when successful, JVM_SocketAvailable
-   // is expected to return 0 on failure and 1 on success to the jdk.
-
-   return (ret == OS_ERR) ? 0 : 1;
+  if (fd < 0)
+    return OS_OK;
+
+  int ret;
+
+  RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
+
+  //%% note ioctl can return 0 when successful, JVM_SocketAvailable
+  // is expected to return 0 on failure and 1 on success to the jdk.
+
+  return (ret == OS_ERR) ? 0 : 1;
 }
 
 // Map a block of memory.
 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
-                     char *addr, size_t bytes, bool read_only,
-                     bool allow_exec) {
+                        char *addr, size_t bytes, bool read_only,
+                        bool allow_exec) {
   int prot;
   int flags;
 
@@ -4007,8 +4007,8 @@
 
 // Remap a block of memory.
 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
-                       char *addr, size_t bytes, bool read_only,
-                       bool allow_exec) {
+                          char *addr, size_t bytes, bool read_only,
+                          bool allow_exec) {
   // same as map_memory() on this OS
   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
                         allow_exec);
@@ -4127,7 +4127,7 @@
     }
   } else {
     jio_fprintf(stderr,
-      "Could not open pause file '%s', continuing immediately.\n", filename);
+                "Could not open pause file '%s', continuing immediately.\n", filename);
   }
 }
 
@@ -4223,28 +4223,28 @@
 
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v == 0) {
-     // Do this the hard way by blocking ...
-     int status = pthread_mutex_lock(_mutex);
-     assert_status(status == 0, status, "mutex_lock");
-     guarantee(_nParked == 0, "invariant");
-     ++_nParked;
-     while (_Event < 0) {
-        status = pthread_cond_wait(_cond, _mutex);
-        // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
-        // Treat this the same as if the wait was interrupted
-        if (status == ETIMEDOUT) { status = EINTR; }
-        assert_status(status == 0 || status == EINTR, status, "cond_wait");
-     }
-     --_nParked;
+    // Do this the hard way by blocking ...
+    int status = pthread_mutex_lock(_mutex);
+    assert_status(status == 0, status, "mutex_lock");
+    guarantee(_nParked == 0, "invariant");
+    ++_nParked;
+    while (_Event < 0) {
+      status = pthread_cond_wait(_cond, _mutex);
+      // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
+      // Treat this the same as if the wait was interrupted
+      if (status == ETIMEDOUT) { status = EINTR; }
+      assert_status(status == 0 || status == EINTR, status, "cond_wait");
+    }
+    --_nParked;
 
     _Event = 0;
-     status = pthread_mutex_unlock(_mutex);
-     assert_status(status == 0, status, "mutex_unlock");
+    status = pthread_mutex_unlock(_mutex);
+    assert_status(status == 0, status, "mutex_unlock");
     // Paranoia to ensure our locked and lock-free paths interact
     // correctly with each other.
     OrderAccess::fence();
@@ -4257,8 +4257,8 @@
 
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v != 0) return OS_OK;
@@ -4302,7 +4302,7 @@
   }
   --_nParked;
   if (_Event >= 0) {
-     ret = OS_OK;
+    ret = OS_OK;
   }
   _Event = 0;
   status = pthread_mutex_unlock(_mutex);
@@ -4532,17 +4532,17 @@
   const int s = _counter;
   _counter = 1;
   if (s < 1) {
-     if (WorkAroundNPTLTimedWaitHang) {
-        status = pthread_cond_signal(_cond);
-        assert(status == 0, "invariant");
-        status = pthread_mutex_unlock(_mutex);
-        assert(status == 0, "invariant");
-     } else {
-        status = pthread_mutex_unlock(_mutex);
-        assert(status == 0, "invariant");
-        status = pthread_cond_signal(_cond);
-        assert(status == 0, "invariant");
-     }
+    if (WorkAroundNPTLTimedWaitHang) {
+      status = pthread_cond_signal(_cond);
+      assert(status == 0, "invariant");
+      status = pthread_mutex_unlock(_mutex);
+      assert(status == 0, "invariant");
+    } else {
+      status = pthread_mutex_unlock(_mutex);
+      assert(status == 0, "invariant");
+      status = pthread_cond_signal(_cond);
+      assert(status == 0, "invariant");
+    }
   } else {
     pthread_mutex_unlock(_mutex);
     assert(status == 0, "invariant");
@@ -4600,26 +4600,26 @@
     // Wait for the child process to exit.  This returns immediately if
     // the child has already exited. */
     while (waitpid(pid, &status, 0) < 0) {
-        switch (errno) {
-        case ECHILD: return 0;
-        case EINTR: break;
-        default: return -1;
-        }
+      switch (errno) {
+      case ECHILD: return 0;
+      case EINTR: break;
+      default: return -1;
+      }
     }
 
     if (WIFEXITED(status)) {
-       // The child exited normally; get its exit code.
-       return WEXITSTATUS(status);
+      // The child exited normally; get its exit code.
+      return WEXITSTATUS(status);
     } else if (WIFSIGNALED(status)) {
-       // The child exited because of a signal
-       // The best value to return is 0x80 + signal number,
-       // because that is what all Unix shells do, and because
-       // it allows callers to distinguish between process exit and
-       // process death by signal.
-       return 0x80 + WTERMSIG(status);
+      // The child exited because of a signal
+      // The best value to return is 0x80 + signal number,
+      // because that is what all Unix shells do, and because
+      // it allows callers to distinguish between process exit and
+      // process death by signal.
+      return 0x80 + WTERMSIG(status);
     } else {
-       // Unknown exit code; pass it through
-       return status;
+      // Unknown exit code; pass it through
+      return status;
     }
   }
 }
@@ -4634,40 +4634,40 @@
 //
 bool os::is_headless_jre() {
 #ifdef __APPLE__
-    // We no longer build headless-only on Mac OS X
-    return false;
+  // We no longer build headless-only on Mac OS X
+  return false;
 #else
-    struct stat statbuf;
-    char buf[MAXPATHLEN];
-    char libmawtpath[MAXPATHLEN];
-    const char *xawtstr  = "/xawt/libmawt" JNI_LIB_SUFFIX;
-    const char *new_xawtstr = "/libawt_xawt" JNI_LIB_SUFFIX;
-    char *p;
-
-    // Get path to libjvm.so
-    os::jvm_path(buf, sizeof(buf));
-
-    // Get rid of libjvm.so
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // Get rid of client or server
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // check xawt/libmawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    // check libawt_xawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, new_xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    return true;
+  struct stat statbuf;
+  char buf[MAXPATHLEN];
+  char libmawtpath[MAXPATHLEN];
+  const char *xawtstr  = "/xawt/libmawt" JNI_LIB_SUFFIX;
+  const char *new_xawtstr = "/libawt_xawt" JNI_LIB_SUFFIX;
+  char *p;
+
+  // Get path to libjvm.so
+  os::jvm_path(buf, sizeof(buf));
+
+  // Get rid of libjvm.so
+  p = strrchr(buf, '/');
+  if (p == NULL) return false;
+  else *p = '\0';
+
+  // Get rid of client or server
+  p = strrchr(buf, '/');
+  if (p == NULL) return false;
+  else *p = '\0';
+
+  // check xawt/libmawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  // check libawt_xawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, new_xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  return true;
 #endif
 }
 
--- a/hotspot/src/os/bsd/vm/os_bsd.hpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.hpp	Wed Sep 10 11:48:20 2014 -0600
@@ -108,7 +108,7 @@
   // that file provides extensions to the os class and not the
   // Bsd class.
   static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
-    intptr_t** ret_sp, intptr_t** ret_fp);
+                                              intptr_t** ret_sp, intptr_t** ret_fp);
 
   // This boolean allows users to forward their own non-matching signals
   // to JVM_handle_bsd_signal, harmlessly.
@@ -147,7 +147,7 @@
   // BsdThreads work-around for 6292965
   static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
 
-private:
+ private:
   typedef int (*sched_getcpu_func_t)(void);
   typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
   typedef int (*numa_max_node_func_t)(void);
@@ -170,7 +170,7 @@
   static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
   static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
-public:
+ public:
   static int sched_getcpu()  { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
   static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
     return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
@@ -190,55 +190,55 @@
 
 
 class PlatformEvent : public CHeapObj<mtInternal> {
-  private:
-    double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
-    volatile int _Event;
-    volatile int _nParked;
-    pthread_mutex_t _mutex[1];
-    pthread_cond_t  _cond[1];
-    double PostPad[2];
-    Thread * _Assoc;
+ private:
+  double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
+  volatile int _Event;
+  volatile int _nParked;
+  pthread_mutex_t _mutex[1];
+  pthread_cond_t  _cond[1];
+  double PostPad[2];
+  Thread * _Assoc;
 
-  public:       // TODO-FIXME: make dtor private
-    ~PlatformEvent() { guarantee(0, "invariant"); }
+ public:       // TODO-FIXME: make dtor private
+  ~PlatformEvent() { guarantee(0, "invariant"); }
 
-  public:
-    PlatformEvent() {
-      int status;
-      status = pthread_cond_init (_cond, NULL);
-      assert_status(status == 0, status, "cond_init");
-      status = pthread_mutex_init (_mutex, NULL);
-      assert_status(status == 0, status, "mutex_init");
-      _Event   = 0;
-      _nParked = 0;
-      _Assoc   = NULL;
-    }
+ public:
+  PlatformEvent() {
+    int status;
+    status = pthread_cond_init (_cond, NULL);
+    assert_status(status == 0, status, "cond_init");
+    status = pthread_mutex_init (_mutex, NULL);
+    assert_status(status == 0, status, "mutex_init");
+    _Event   = 0;
+    _nParked = 0;
+    _Assoc   = NULL;
+  }
 
-    // Use caution with reset() and fired() -- they may require MEMBARs
-    void reset() { _Event = 0; }
-    int  fired() { return _Event; }
-    void park();
-    void unpark();
-    int  park(jlong millis);
-    void SetAssociation(Thread * a) { _Assoc = a; }
+  // Use caution with reset() and fired() -- they may require MEMBARs
+  void reset() { _Event = 0; }
+  int  fired() { return _Event; }
+  void park();
+  void unpark();
+  int  park(jlong millis);
+  void SetAssociation(Thread * a) { _Assoc = a; }
 };
 
 class PlatformParker : public CHeapObj<mtInternal> {
-  protected:
-    pthread_mutex_t _mutex[1];
-    pthread_cond_t  _cond[1];
+ protected:
+  pthread_mutex_t _mutex[1];
+  pthread_cond_t  _cond[1];
 
-  public:       // TODO-FIXME: make dtor private
-    ~PlatformParker() { guarantee(0, "invariant"); }
+ public:       // TODO-FIXME: make dtor private
+  ~PlatformParker() { guarantee(0, "invariant"); }
 
-  public:
-    PlatformParker() {
-      int status;
-      status = pthread_cond_init (_cond, NULL);
-      assert_status(status == 0, status, "cond_init");
-      status = pthread_mutex_init (_mutex, NULL);
-      assert_status(status == 0, status, "mutex_init");
-    }
+ public:
+  PlatformParker() {
+    int status;
+    status = pthread_cond_init (_cond, NULL);
+    assert_status(status == 0, status, "cond_init");
+    status = pthread_mutex_init (_mutex, NULL);
+    assert_status(status == 0, status, "mutex_init");
+  }
 };
 
 #endif // OS_BSD_VM_OS_BSD_HPP
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Wed Sep 10 11:48:20 2014 -0600
@@ -290,10 +290,10 @@
 pid_t os::Linux::gettid() {
   int rslt = syscall(SYS_gettid);
   if (rslt == -1) {
-     // old kernel, no NPTL support
-     return getpid();
+    // old kernel, no NPTL support
+    return getpid();
   } else {
-     return (pid_t)rslt;
+    return (pid_t)rslt;
   }
 }
 
@@ -465,14 +465,14 @@
 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 
 bool os::Linux::is_sig_ignored(int sig) {
-      struct sigaction oact;
-      sigaction(sig, (struct sigaction*)NULL, &oact);
-      void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
-                                     : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
-      if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
-           return true;
-      else
-           return false;
+  struct sigaction oact;
+  sigaction(sig, (struct sigaction*)NULL, &oact);
+  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
+                                 : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
+  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
+    return true;
+  else
+    return false;
 }
 
 void os::Linux::signal_sets_init() {
@@ -503,18 +503,18 @@
   sigaddset(&unblocked_sigs, SR_signum);
 
   if (!ReduceSignalUsage) {
-   if (!os::Linux::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
+    if (!os::Linux::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
-   }
-   if (!os::Linux::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
+    }
+    if (!os::Linux::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
-   }
-   if (!os::Linux::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
+    }
+    if (!os::Linux::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
-   }
+    }
   }
   // Fill in signals that are blocked by all but the VM thread.
   sigemptyset(&vm_sigs);
@@ -583,50 +583,50 @@
 
   size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
   if (n > 0) {
-     char *str = (char *)malloc(n, mtInternal);
-     confstr(_CS_GNU_LIBC_VERSION, str, n);
-     os::Linux::set_glibc_version(str);
+    char *str = (char *)malloc(n, mtInternal);
+    confstr(_CS_GNU_LIBC_VERSION, str, n);
+    os::Linux::set_glibc_version(str);
   } else {
-     // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
-     static char _gnu_libc_version[32];
-     jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
-              "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
-     os::Linux::set_glibc_version(_gnu_libc_version);
+    // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
+    static char _gnu_libc_version[32];
+    jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
+                 "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
+    os::Linux::set_glibc_version(_gnu_libc_version);
   }
 
   n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
   if (n > 0) {
-     char *str = (char *)malloc(n, mtInternal);
-     confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
-     // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
-     // us "NPTL-0.29" even we are running with LinuxThreads. Check if this
-     // is the case. LinuxThreads has a hard limit on max number of threads.
-     // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
-     // On the other hand, NPTL does not have such a limit, sysconf()
-     // will return -1 and errno is not changed. Check if it is really NPTL.
-     if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 &&
-         strstr(str, "NPTL") &&
-         sysconf(_SC_THREAD_THREADS_MAX) > 0) {
-       free(str);
-       os::Linux::set_libpthread_version("linuxthreads");
-     } else {
-       os::Linux::set_libpthread_version(str);
-     }
+    char *str = (char *)malloc(n, mtInternal);
+    confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
+    // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
+    // us "NPTL-0.29" even we are running with LinuxThreads. Check if this
+    // is the case. LinuxThreads has a hard limit on max number of threads.
+    // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
+    // On the other hand, NPTL does not have such a limit, sysconf()
+    // will return -1 and errno is not changed. Check if it is really NPTL.
+    if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 &&
+        strstr(str, "NPTL") &&
+        sysconf(_SC_THREAD_THREADS_MAX) > 0) {
+      free(str);
+      os::Linux::set_libpthread_version("linuxthreads");
+    } else {
+      os::Linux::set_libpthread_version(str);
+    }
   } else {
     // glibc before 2.3.2 only has LinuxThreads.
     os::Linux::set_libpthread_version("linuxthreads");
   }
 
   if (strstr(libpthread_version(), "NPTL")) {
-     os::Linux::set_is_NPTL();
+    os::Linux::set_is_NPTL();
   } else {
-     os::Linux::set_is_LinuxThreads();
+    os::Linux::set_is_LinuxThreads();
   }
 
   // LinuxThreads have two flavors: floating-stack mode, which allows variable
   // stack size; and fixed-stack mode. NPTL is always floating-stack.
   if (os::Linux::is_NPTL() || os::Linux::supports_variable_stack_size()) {
-     os::Linux::set_is_floating_stack();
+    os::Linux::set_is_floating_stack();
   }
 }
 
@@ -935,9 +935,9 @@
 
   // Aborted due to thread limit being reached
   if (state == ZOMBIE) {
-      thread->set_osthread(NULL);
-      delete osthread;
-      return false;
+    thread->set_osthread(NULL);
+    delete osthread;
+    return false;
   }
 
   // The thread is returned suspended (in state INITIALIZED),
@@ -957,7 +957,7 @@
 
 bool os::create_attached_thread(JavaThread* thread) {
 #ifdef ASSERT
-    thread->verify_not_published();
+  thread->verify_not_published();
 #endif
 
   // Allocate the OSThread object
@@ -1029,7 +1029,7 @@
     // Restore caller's signal mask
     sigset_t sigmask = osthread->caller_sigmask();
     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
-   }
+  }
 
   delete osthread;
 }
@@ -1085,7 +1085,7 @@
          "os::init did not locate initial thread's stack region");
   if ((address)&dummy >= initial_thread_stack_bottom() &&
       (address)&dummy < initial_thread_stack_bottom() + initial_thread_stack_size())
-       return true;
+    return true;
   else return false;
 }
 
@@ -1097,10 +1097,10 @@
     while (!feof(fp)) {
       if (fscanf(fp, "%p-%p", &low, &high) == 2) {
         if (low <= addr && addr < high) {
-           if (vma_low)  *vma_low  = low;
-           if (vma_high) *vma_high = high;
-           fclose(fp);
-           return true;
+          if (vma_low)  *vma_low  = low;
+          if (vma_high) *vma_high = high;
+          fclose(fp);
+          return true;
         }
       }
       for (;;) {
@@ -1137,7 +1137,7 @@
   // FIXME: alt signal stack is gone, maybe we can relax this constraint?
   // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small
   if (stack_size > 2 * K * K IA64_ONLY(*2))
-      stack_size = 2 * K * K IA64_ONLY(*2);
+    stack_size = 2 * K * K IA64_ONLY(*2);
   // Try to figure out where the stack base (top) is. This is harder.
   //
   // When an application is started, glibc saves the initial stack pointer in
@@ -1224,43 +1224,43 @@
         /*                                     1   1   1   1   1   1   1   1   1   1   2   2    2    2    2    2    2    2    2 */
         /*              3  4  5  6  7  8   9   0   1   2   3   4   5   6   7   8   9   0   1    2    3    4    5    6    7    8 */
         i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
-             &state,          /* 3  %c  */
-             &ppid,           /* 4  %d  */
-             &pgrp,           /* 5  %d  */
-             &session,        /* 6  %d  */
-             &nr,             /* 7  %d  */
-             &tpgrp,          /* 8  %d  */
-             &flags,          /* 9  %lu  */
-             &minflt,         /* 10 %lu  */
-             &cminflt,        /* 11 %lu  */
-             &majflt,         /* 12 %lu  */
-             &cmajflt,        /* 13 %lu  */
-             &utime,          /* 14 %lu  */
-             &stime,          /* 15 %lu  */
-             &cutime,         /* 16 %ld  */
-             &cstime,         /* 17 %ld  */
-             &prio,           /* 18 %ld  */
-             &nice,           /* 19 %ld  */
-             &junk,           /* 20 %ld  */
-             &it_real,        /* 21 %ld  */
-             &start,          /* 22 UINTX_FORMAT */
-             &vsize,          /* 23 UINTX_FORMAT */
-             &rss,            /* 24 INTX_FORMAT  */
-             &rsslim,         /* 25 UINTX_FORMAT */
-             &scodes,         /* 26 UINTX_FORMAT */
-             &ecode,          /* 27 UINTX_FORMAT */
-             &stack_start);   /* 28 UINTX_FORMAT */
+                   &state,          /* 3  %c  */
+                   &ppid,           /* 4  %d  */
+                   &pgrp,           /* 5  %d  */
+                   &session,        /* 6  %d  */
+                   &nr,             /* 7  %d  */
+                   &tpgrp,          /* 8  %d  */
+                   &flags,          /* 9  %lu  */
+                   &minflt,         /* 10 %lu  */
+                   &cminflt,        /* 11 %lu  */
+                   &majflt,         /* 12 %lu  */
+                   &cmajflt,        /* 13 %lu  */
+                   &utime,          /* 14 %lu  */
+                   &stime,          /* 15 %lu  */
+                   &cutime,         /* 16 %ld  */
+                   &cstime,         /* 17 %ld  */
+                   &prio,           /* 18 %ld  */
+                   &nice,           /* 19 %ld  */
+                   &junk,           /* 20 %ld  */
+                   &it_real,        /* 21 %ld  */
+                   &start,          /* 22 UINTX_FORMAT */
+                   &vsize,          /* 23 UINTX_FORMAT */
+                   &rss,            /* 24 INTX_FORMAT  */
+                   &rsslim,         /* 25 UINTX_FORMAT */
+                   &scodes,         /* 26 UINTX_FORMAT */
+                   &ecode,          /* 27 UINTX_FORMAT */
+                   &stack_start);   /* 28 UINTX_FORMAT */
       }
 
 #undef _UFM
 #undef _DFM
 
       if (i != 28 - 2) {
-         assert(false, "Bad conversion from /proc/self/stat");
-         // product mode - assume we are the initial thread, good luck in the
-         // embedded case.
-         warning("Can't detect initial thread stack location - bad conversion");
-         stack_start = (uintptr_t) &rlim;
+        assert(false, "Bad conversion from /proc/self/stat");
+        // product mode - assume we are the initial thread, good luck in the
+        // embedded case.
+        warning("Can't detect initial thread stack location - bad conversion");
+        stack_start = (uintptr_t) &rlim;
       }
     } else {
       // For some reason we can't open /proc/self/stat (for example, running on
@@ -1298,9 +1298,9 @@
   stack_top = align_size_up(stack_top, page_size());
 
   if (max_size && stack_size > max_size) {
-     _initial_thread_stack_size = max_size;
+    _initial_thread_stack_size = max_size;
   } else {
-     _initial_thread_stack_size = stack_size;
+    _initial_thread_stack_size = stack_size;
   }
 
   _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
@@ -1423,8 +1423,8 @@
   // better than 1 sec. This is extra check for reliability.
 
   if (pthread_getcpuclockid_func &&
-     pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
-     sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
+      pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
+      sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
 
     _supports_fast_thread_cpu_time = true;
     _pthread_getcpuclockid = pthread_getcpuclockid_func;
@@ -1769,9 +1769,9 @@
   int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);
 
   if (rslt) {
-     // buf already contains library name
-     if (offset) *offset = addr - data.base;
-     return true;
+    // buf already contains library name
+    if (offset) *offset = addr - data.base;
+    return true;
   }
   if (dladdr((void*)addr, &dlinfo) != 0) {
     if (dlinfo.dli_fname != NULL) {
@@ -1788,9 +1788,9 @@
   return false;
 }
 
-  // Loads .dll/.so and
-  // in case of error it checks if .dll/.so was built for the
-  // same architecture as Hotspot is running on
+// Loads .dll/.so and
+// in case of error it checks if .dll/.so was built for the
+// same architecture as Hotspot is running on
 
 
 // Remember the stack's state. The Linux dynamic linker will change
@@ -1905,7 +1905,7 @@
 
   bool failed_to_read_elf_head=
     (sizeof(elf_head)!=
-        (::read(file_descriptor, &elf_head,sizeof(elf_head))));
+     (::read(file_descriptor, &elf_head,sizeof(elf_head))));
 
   ::close(file_descriptor);
   if (failed_to_read_elf_head) {
@@ -1949,33 +1949,33 @@
   };
 
   #if  (defined IA32)
-    static  Elf32_Half running_arch_code=EM_386;
+  static  Elf32_Half running_arch_code=EM_386;
   #elif   (defined AMD64)
-    static  Elf32_Half running_arch_code=EM_X86_64;
+  static  Elf32_Half running_arch_code=EM_X86_64;
   #elif  (defined IA64)
-    static  Elf32_Half running_arch_code=EM_IA_64;
+  static  Elf32_Half running_arch_code=EM_IA_64;
   #elif  (defined __sparc) && (defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARCV9;
+  static  Elf32_Half running_arch_code=EM_SPARCV9;
   #elif  (defined __sparc) && (!defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARC;
+  static  Elf32_Half running_arch_code=EM_SPARC;
   #elif  (defined __powerpc64__)
-    static  Elf32_Half running_arch_code=EM_PPC64;
+  static  Elf32_Half running_arch_code=EM_PPC64;
   #elif  (defined __powerpc__)
-    static  Elf32_Half running_arch_code=EM_PPC;
+  static  Elf32_Half running_arch_code=EM_PPC;
   #elif  (defined ARM)
-    static  Elf32_Half running_arch_code=EM_ARM;
+  static  Elf32_Half running_arch_code=EM_ARM;
   #elif  (defined S390)
-    static  Elf32_Half running_arch_code=EM_S390;
+  static  Elf32_Half running_arch_code=EM_S390;
   #elif  (defined ALPHA)
-    static  Elf32_Half running_arch_code=EM_ALPHA;
+  static  Elf32_Half running_arch_code=EM_ALPHA;
   #elif  (defined MIPSEL)
-    static  Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
+  static  Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
   #elif  (defined PARISC)
-    static  Elf32_Half running_arch_code=EM_PARISC;
+  static  Elf32_Half running_arch_code=EM_PARISC;
   #elif  (defined MIPS)
-    static  Elf32_Half running_arch_code=EM_MIPS;
+  static  Elf32_Half running_arch_code=EM_MIPS;
   #elif  (defined M68K)
-    static  Elf32_Half running_arch_code=EM_68K;
+  static  Elf32_Half running_arch_code=EM_68K;
   #else
     #error Method os::dll_load requires that one of following is defined:\
          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
@@ -1998,7 +1998,7 @@
   }
 
   assert(running_arch_index != -1,
-    "Didn't find running architecture code (running_arch_code) in arch_array");
+         "Didn't find running architecture code (running_arch_code) in arch_array");
   if (running_arch_index == -1) {
     // Even though running architecture detection failed
     // we may still continue with reporting dlerror() message
@@ -2020,13 +2020,13 @@
   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
     if (lib_arch.name!=NULL) {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-        " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
-        lib_arch.name, arch_array[running_arch_index].name);
+                 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
+                 lib_arch.name, arch_array[running_arch_index].name);
     } else {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-      " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
-        lib_arch.code,
-        arch_array[running_arch_index].name);
+                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
+                 lib_arch.code,
+                 arch_array[running_arch_index].name);
     }
   }
 
@@ -2093,7 +2093,7 @@
 static bool _print_ascii_file(const char* filename, outputStream* st) {
   int fd = ::open(filename, O_RDONLY);
   if (fd == -1) {
-     return false;
+    return false;
   }
 
   char buf[32];
@@ -2108,16 +2108,16 @@
 }
 
 void os::print_dll_info(outputStream *st) {
-   st->print_cr("Dynamic libraries:");
-
-   char fname[32];
-   pid_t pid = os::Linux::gettid();
-
-   jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
-
-   if (!_print_ascii_file(fname, st)) {
-     st->print("Can not get library information for pid = %d\n", pid);
-   }
+  st->print_cr("Dynamic libraries:");
+
+  char fname[32];
+  pid_t pid = os::Linux::gettid();
+
+  jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
+
+  if (!_print_ascii_file(fname, st)) {
+    st->print("Can not get library information for pid = %d\n", pid);
+  }
 }
 
 void os::print_os_info_brief(outputStream* st) {
@@ -2173,28 +2173,28 @@
 // an informative string like "6.0.6" or "wheezy/sid". Because of this
 // "Debian " is printed before the contents of the debian_version file.
 void os::Linux::print_distro_info(outputStream* st) {
-   if (!_print_ascii_file("/etc/oracle-release", st) &&
-       !_print_ascii_file("/etc/mandriva-release", st) &&
-       !_print_ascii_file("/etc/mandrake-release", st) &&
-       !_print_ascii_file("/etc/sun-release", st) &&
-       !_print_ascii_file("/etc/redhat-release", st) &&
-       !_print_ascii_file("/etc/lsb-release", st) &&
-       !_print_ascii_file("/etc/SuSE-release", st) &&
-       !_print_ascii_file("/etc/turbolinux-release", st) &&
-       !_print_ascii_file("/etc/gentoo-release", st) &&
-       !_print_ascii_file("/etc/ltib-release", st) &&
-       !_print_ascii_file("/etc/angstrom-version", st) &&
-       !_print_ascii_file("/etc/system-release", st) &&
-       !_print_ascii_file("/etc/os-release", st)) {
-
-       if (file_exists("/etc/debian_version")) {
-         st->print("Debian ");
-         _print_ascii_file("/etc/debian_version", st);
-       } else {
-         st->print("Linux");
-       }
-   }
-   st->cr();
+  if (!_print_ascii_file("/etc/oracle-release", st) &&
+      !_print_ascii_file("/etc/mandriva-release", st) &&
+      !_print_ascii_file("/etc/mandrake-release", st) &&
+      !_print_ascii_file("/etc/sun-release", st) &&
+      !_print_ascii_file("/etc/redhat-release", st) &&
+      !_print_ascii_file("/etc/lsb-release", st) &&
+      !_print_ascii_file("/etc/SuSE-release", st) &&
+      !_print_ascii_file("/etc/turbolinux-release", st) &&
+      !_print_ascii_file("/etc/gentoo-release", st) &&
+      !_print_ascii_file("/etc/ltib-release", st) &&
+      !_print_ascii_file("/etc/angstrom-version", st) &&
+      !_print_ascii_file("/etc/system-release", st) &&
+      !_print_ascii_file("/etc/os-release", st)) {
+
+    if (file_exists("/etc/debian_version")) {
+      st->print("Debian ");
+      _print_ascii_file("/etc/debian_version", st);
+    } else {
+      st->print("Linux");
+    }
+  }
+  st->cr();
 }
 
 void os::Linux::print_libversion_info(outputStream* st) {
@@ -2203,15 +2203,15 @@
   st->print("%s ", os::Linux::glibc_version());
   st->print("%s ", os::Linux::libpthread_version());
   if (os::Linux::is_LinuxThreads()) {
-     st->print("(%s stack)", os::Linux::is_floating_stack() ? "floating" : "fixed");
+    st->print("(%s stack)", os::Linux::is_floating_stack() ? "floating" : "fixed");
   }
   st->cr();
 }
 
 void os::Linux::print_full_memory_info(outputStream* st) {
-   st->print("\n/proc/meminfo:\n");
-   _print_ascii_file("/proc/meminfo", st);
-   st->cr();
+  st->print("\n/proc/meminfo:\n");
+  _print_ascii_file("/proc/meminfo", st);
+  st->cr();
 }
 
 void os::print_memory_info(outputStream* st) {
@@ -2301,8 +2301,8 @@
 
   char dli_fname[MAXPATHLEN];
   bool ret = dll_address_to_library_name(
-                CAST_FROM_FN_PTR(address, os::jvm_path),
-                dli_fname, sizeof(dli_fname), NULL);
+                                         CAST_FROM_FN_PTR(address, os::jvm_path),
+                                         dli_fname, sizeof(dli_fname), NULL);
   assert(ret, "cannot locate libjvm");
   char *rp = NULL;
   if (ret && dli_fname[0] != '\0') {
@@ -2386,12 +2386,12 @@
   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
   // don't want to flood the manager thread with sem_post requests.
   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
-      return;
+    return;
 
   // Ctrl-C is pressed during error reporting, likely because the error
   // handler fails to abort. Let VM die immediately.
   if (sig == SIGINT && is_error_reported()) {
-     os::die();
+    os::die();
   }
 
   os::signal_notify(sig);
@@ -2402,15 +2402,15 @@
 }
 
 class Semaphore : public StackObj {
-  public:
-    Semaphore();
-    ~Semaphore();
-    void signal();
-    void wait();
-    bool trywait();
-    bool timedwait(unsigned int sec, int nsec);
-  private:
-    sem_t _semaphore;
+ public:
+  Semaphore();
+  ~Semaphore();
+  void signal();
+  void wait();
+  bool trywait();
+  bool timedwait(unsigned int sec, int nsec);
+ private:
+  sem_t _semaphore;
 };
 
 Semaphore::Semaphore() {
@@ -2661,7 +2661,7 @@
 int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
-                                   MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
+                                     MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
   if (res != (uintptr_t) MAP_FAILED) {
     if (UseNUMAInterleaving) {
       numa_make_global(addr, size);
@@ -2866,9 +2866,9 @@
       set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
                                             libnuma_dlsym(handle, "numa_tonode_memory")));
       set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
-                                            libnuma_dlsym(handle, "numa_interleave_memory")));
+                                                libnuma_dlsym(handle, "numa_interleave_memory")));
       set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
-                                            libnuma_dlsym(handle, "numa_set_bind_policy")));
+                                              libnuma_dlsym(handle, "numa_set_bind_policy")));
 
 
       if (numa_available() != -1) {
@@ -2940,7 +2940,7 @@
 
 bool os::pd_uncommit_memory(char* addr, size_t size) {
   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
-                MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
+                                     MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
   return res  != (uintptr_t) MAP_FAILED;
 }
 
@@ -3031,8 +3031,8 @@
     if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
       // Fallback to slow path on all errors, including EAGAIN
       stack_extent = (uintptr_t) get_stack_commited_bottom(
-                                    os::Linux::initial_thread_stack_bottom(),
-                                    (size_t)addr - stack_extent);
+                                                           os::Linux::initial_thread_stack_bottom(),
+                                                           (size_t)addr - stack_extent);
     }
 
     if (stack_extent < (uintptr_t)addr) {
@@ -3105,7 +3105,7 @@
 }
 
 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
-                         size_t alignment_hint) {
+                            size_t alignment_hint) {
   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
 }
 
@@ -3296,8 +3296,8 @@
 
   if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
     warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
-        SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
-        proper_unit_for_byte_size(large_page_size));
+            SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
+            proper_unit_for_byte_size(large_page_size));
   }
 
   return large_page_size;
@@ -3404,25 +3404,25 @@
   // Currently, size is the total size of the heap
   int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
   if (shmid == -1) {
-     // Possible reasons for shmget failure:
-     // 1. shmmax is too small for Java heap.
-     //    > check shmmax value: cat /proc/sys/kernel/shmmax
-     //    > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
-     // 2. not enough large page memory.
-     //    > check available large pages: cat /proc/meminfo
-     //    > increase amount of large pages:
-     //          echo new_value > /proc/sys/vm/nr_hugepages
-     //      Note 1: different Linux may use different name for this property,
-     //            e.g. on Redhat AS-3 it is "hugetlb_pool".
-     //      Note 2: it's possible there's enough physical memory available but
-     //            they are so fragmented after a long run that they can't
-     //            coalesce into large pages. Try to reserve large pages when
-     //            the system is still "fresh".
-     if (warn_on_failure) {
-       jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
-       warning("%s", msg);
-     }
-     return NULL;
+    // Possible reasons for shmget failure:
+    // 1. shmmax is too small for Java heap.
+    //    > check shmmax value: cat /proc/sys/kernel/shmmax
+    //    > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
+    // 2. not enough large page memory.
+    //    > check available large pages: cat /proc/meminfo
+    //    > increase amount of large pages:
+    //          echo new_value > /proc/sys/vm/nr_hugepages
+    //      Note 1: different Linux may use different name for this property,
+    //            e.g. on Redhat AS-3 it is "hugetlb_pool".
+    //      Note 2: it's possible there's enough physical memory available but
+    //            they are so fragmented after a long run that they can't
+    //            coalesce into large pages. Try to reserve large pages when
+    //            the system is still "fresh".
+    if (warn_on_failure) {
+      jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
+      warning("%s", msg);
+    }
+    return NULL;
   }
 
   // attach to the region
@@ -3436,11 +3436,11 @@
   shmctl(shmid, IPC_RMID, NULL);
 
   if ((intptr_t)addr == -1) {
-     if (warn_on_failure) {
-       jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
-       warning("%s", msg);
-     }
-     return NULL;
+    if (warn_on_failure) {
+      jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
+      warning("%s", msg);
+    }
+    return NULL;
   }
 
   return addr;
@@ -3457,7 +3457,7 @@
   if (warn_on_failure) {
     char msg[128];
     jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
-        PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
+                 PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
     warning("%s", msg);
   }
 }
@@ -3562,9 +3562,9 @@
   }
 
   if (lp_end != end) {
-      result = ::mmap(lp_end, end - lp_end, prot,
-                      MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
-                      -1, 0);
+    result = ::mmap(lp_end, end - lp_end, prot,
+                    MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+                    -1, 0);
     if (result == MAP_FAILED) {
       ::munmap(start, lp_end - start);
       return NULL;
@@ -3698,12 +3698,12 @@
   // if kernel honors the hint then we can return immediately.
   char * addr = anon_mmap(requested_addr, bytes, false);
   if (addr == requested_addr) {
-     return requested_addr;
+    return requested_addr;
   }
 
   if (addr != NULL) {
-     // mmap() is successful but it fails to reserve at the requested address
-     anon_munmap(addr, bytes);
+    // mmap() is successful but it fails to reserve at the requested address
+    anon_munmap(addr, bytes);
   }
 
   int i;
@@ -3988,12 +3988,12 @@
   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
     int sig = ::strtol(s, 0, 10);
     if (sig > 0 || sig < _NSIG) {
-        SR_signum = sig;
+      SR_signum = sig;
     }
   }
 
   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
-        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
+         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
 
   sigemptyset(&SR_sigset);
   sigaddset(&SR_sigset, SR_signum);
@@ -4304,12 +4304,12 @@
     signal_setting_t begin_signal_setting = NULL;
     signal_setting_t end_signal_setting = NULL;
     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
-                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
+                                          dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
     if (begin_signal_setting != NULL) {
       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
-                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
+                                          dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
-                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
+                                         dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
       libjsig_is_loaded = true;
       assert(UseSignalChaining, "should enable signal-chaining");
     }
@@ -4434,7 +4434,7 @@
 
   // Check: is it our handler?
   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
-     handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
+      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
     // It is our signal handler
     // check for flags, reset system-used one!
     if ((int)sa.sa_flags != os::Linux::get_our_sigflags(sig)) {
@@ -4698,22 +4698,22 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
-            (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
-                    (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
+                                      (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
+                                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
       threadStackSizeInBytes < os::Linux::min_stack_allowed) {
-        tty->print_cr("\nThe stack size specified is too small, "
-                      "Specify at least %dk",
-                      os::Linux::min_stack_allowed/ K);
-        return JNI_ERR;
+    tty->print_cr("\nThe stack size specified is too small, "
+                  "Specify at least %dk",
+                  os::Linux::min_stack_allowed/ K);
+    return JNI_ERR;
   }
 
   // Make the stack size a multiple of the page size so that
   // the yellow/red zones can be guarded.
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
-        vm_page_size()));
+                                                vm_page_size()));
 
   Linux::capture_initial_stack(JavaThread::stack_size_at_create());
 
@@ -4723,9 +4723,9 @@
 
   Linux::libpthread_init();
   if (PrintMiscellaneous && (Verbose || WizardMode)) {
-     tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
-          Linux::glibc_version(), Linux::libpthread_version(),
-          Linux::is_floating_stack() ? "floating stack" : "fixed stack");
+    tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
+                  Linux::glibc_version(), Linux::libpthread_version(),
+                  Linux::is_floating_stack() ? "floating stack" : "fixed stack");
   }
 
   if (UseNUMA) {
@@ -4865,12 +4865,12 @@
 }
 
 class PcFetcher : public os::SuspendedThreadTask {
-public:
+ public:
   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
   ExtendedPC result();
-protected:
+ protected:
   void do_task(const os::SuspendedThreadTaskContext& context);
-private:
+ private:
   ExtendedPC _epc;
 };
 
@@ -4904,17 +4904,17 @@
 
 int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
 {
-   if (is_NPTL()) {
-      return pthread_cond_timedwait(_cond, _mutex, _abstime);
-   } else {
-      // 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control
-      // word back to default 64bit precision if condvar is signaled. Java
-      // wants 53bit precision.  Save and restore current value.
-      int fpu = get_fpu_control_word();
-      int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
-      set_fpu_control_word(fpu);
-      return status;
-   }
+  if (is_NPTL()) {
+    return pthread_cond_timedwait(_cond, _mutex, _abstime);
+  } else {
+    // 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control
+    // word back to default 64bit precision if condvar is signaled. Java
+    // wants 53bit precision.  Save and restore current value.
+    int fpu = get_fpu_control_word();
+    int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
+    set_fpu_control_word(fpu);
+    return status;
+  }
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -4927,7 +4927,7 @@
     st->print(PTR_FORMAT ": ", addr);
     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
       st->print("%s+%#x", dlinfo.dli_sname,
-                 addr - (intptr_t)dlinfo.dli_saddr);
+                addr - (intptr_t)dlinfo.dli_saddr);
     } else if (dlinfo.dli_fbase != NULL) {
       st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
     } else {
@@ -5096,11 +5096,11 @@
      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
      */
 #ifdef FD_CLOEXEC
-    {
-        int flags = ::fcntl(fd, F_GETFD);
-        if (flags != -1)
-            ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
-    }
+  {
+    int flags = ::fcntl(fd, F_GETFD);
+    if (flags != -1)
+      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
+  }
 #endif
 
   if (o_delete != 0) {
@@ -5174,8 +5174,8 @@
 
 // Map a block of memory.
 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
-                     char *addr, size_t bytes, bool read_only,
-                     bool allow_exec) {
+                        char *addr, size_t bytes, bool read_only,
+                        bool allow_exec) {
   int prot;
   int flags = MAP_PRIVATE;
 
@@ -5204,8 +5204,8 @@
 
 // Remap a block of memory.
 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
-                       char *addr, size_t bytes, bool read_only,
-                       bool allow_exec) {
+                          char *addr, size_t bytes, bool read_only,
+                          bool allow_exec) {
   // same as map_memory() on this OS
   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
                         allow_exec);
@@ -5360,7 +5360,7 @@
     }
   } else {
     jio_fprintf(stderr,
-      "Could not open pause file '%s', continuing immediately.\n", filename);
+                "Could not open pause file '%s', continuing immediately.\n", filename);
   }
 }
 
@@ -5471,28 +5471,28 @@
 
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v == 0) {
-     // Do this the hard way by blocking ...
-     int status = pthread_mutex_lock(_mutex);
-     assert_status(status == 0, status, "mutex_lock");
-     guarantee(_nParked == 0, "invariant");
-     ++_nParked;
-     while (_Event < 0) {
-        status = pthread_cond_wait(_cond, _mutex);
-        // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
-        // Treat this the same as if the wait was interrupted
-        if (status == ETIME) { status = EINTR; }
-        assert_status(status == 0 || status == EINTR, status, "cond_wait");
-     }
-     --_nParked;
+    // Do this the hard way by blocking ...
+    int status = pthread_mutex_lock(_mutex);
+    assert_status(status == 0, status, "mutex_lock");
+    guarantee(_nParked == 0, "invariant");
+    ++_nParked;
+    while (_Event < 0) {
+      status = pthread_cond_wait(_cond, _mutex);
+      // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
+      // Treat this the same as if the wait was interrupted
+      if (status == ETIME) { status = EINTR; }
+      assert_status(status == 0 || status == EINTR, status, "cond_wait");
+    }
+    --_nParked;
 
     _Event = 0;
-     status = pthread_mutex_unlock(_mutex);
-     assert_status(status == 0, status, "mutex_unlock");
+    status = pthread_mutex_unlock(_mutex);
+    assert_status(status == 0, status, "mutex_unlock");
     // Paranoia to ensure our locked and lock-free paths interact
     // correctly with each other.
     OrderAccess::fence();
@@ -5505,8 +5505,8 @@
 
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v != 0) return OS_OK;
@@ -5550,7 +5550,7 @@
   }
   --_nParked;
   if (_Event >= 0) {
-     ret = OS_OK;
+    ret = OS_OK;
   }
   _Event = 0;
   status = pthread_mutex_unlock(_mutex);
@@ -5849,7 +5849,7 @@
   // On IA64 there's no fork syscall, we have to use fork() and hope for
   // the best...
   pid_t pid = NOT_IA64(syscall(__NR_fork);)
-              IA64_ONLY(fork();)
+  IA64_ONLY(fork();)
 
   if (pid < 0) {
     // fork failed
@@ -5880,26 +5880,26 @@
     // Wait for the child process to exit.  This returns immediately if
     // the child has already exited. */
     while (waitpid(pid, &status, 0) < 0) {
-        switch (errno) {
-        case ECHILD: return 0;
-        case EINTR: break;
-        default: return -1;
-        }
+      switch (errno) {
+      case ECHILD: return 0;
+      case EINTR: break;
+      default: return -1;
+      }
     }
 
     if (WIFEXITED(status)) {
-       // The child exited normally; get its exit code.
-       return WEXITSTATUS(status);
+      // The child exited normally; get its exit code.
+      return WEXITSTATUS(status);
     } else if (WIFSIGNALED(status)) {
-       // The child exited because of a signal
-       // The best value to return is 0x80 + signal number,
-       // because that is what all Unix shells do, and because
-       // it allows callers to distinguish between process exit and
-       // process death by signal.
-       return 0x80 + WTERMSIG(status);
+      // The child exited because of a signal
+      // The best value to return is 0x80 + signal number,
+      // because that is what all Unix shells do, and because
+      // it allows callers to distinguish between process exit and
+      // process death by signal.
+      return 0x80 + WTERMSIG(status);
     } else {
-       // Unknown exit code; pass it through
-       return status;
+      // Unknown exit code; pass it through
+      return status;
     }
   }
 }
@@ -5913,37 +5913,37 @@
 // as libawt.so, and renamed libawt_xawt.so
 //
 bool os::is_headless_jre() {
-    struct stat statbuf;
-    char buf[MAXPATHLEN];
-    char libmawtpath[MAXPATHLEN];
-    const char *xawtstr  = "/xawt/libmawt.so";
-    const char *new_xawtstr = "/libawt_xawt.so";
-    char *p;
-
-    // Get path to libjvm.so
-    os::jvm_path(buf, sizeof(buf));
-
-    // Get rid of libjvm.so
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // Get rid of client or server
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // check xawt/libmawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    // check libawt_xawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, new_xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    return true;
+  struct stat statbuf;
+  char buf[MAXPATHLEN];
+  char libmawtpath[MAXPATHLEN];
+  const char *xawtstr  = "/xawt/libmawt.so";
+  const char *new_xawtstr = "/libawt_xawt.so";
+  char *p;
+
+  // Get path to libjvm.so
+  os::jvm_path(buf, sizeof(buf));
+
+  // Get rid of libjvm.so
+  p = strrchr(buf, '/');
+  if (p == NULL) return false;
+  else *p = '\0';
+
+  // Get rid of client or server
+  p = strrchr(buf, '/');
+  if (p == NULL) return false;
+  else *p = '\0';
+
+  // check xawt/libmawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  // check libawt_xawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, new_xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  return true;
 }
 
 // Get the default path to the core file
@@ -6025,7 +6025,7 @@
   int    fd;
   fd = open ("/dev/mem_notify", O_RDONLY, 0);
   if (fd < 0) {
-      return;
+    return;
   }
 
   if (memnotify_thread() == NULL) {
@@ -6089,11 +6089,11 @@
 
   static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
     if (!UseHugeTLBFS) {
-        return;
+      return;
     }
 
     test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
-        size, alignment);
+             size, alignment);
 
     assert(size >= os::large_page_size(), "Incorrect input to test");
 
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Wed Sep 10 11:48:20 2014 -0600
@@ -151,7 +151,7 @@
   // that file provides extensions to the os class and not the
   // Linux class.
   static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
-    intptr_t** ret_sp, intptr_t** ret_fp);
+                                              intptr_t** ret_sp, intptr_t** ret_fp);
 
   // This boolean allows users to forward their own non-matching signals
   // to JVM_handle_linux_signal, harmlessly.
@@ -222,10 +222,10 @@
   static jlong fast_thread_cpu_time(clockid_t clockid);
 
   // pthread_cond clock suppport
-  private:
+ private:
   static pthread_condattr_t _condattr[1];
 
-  public:
+ public:
   static pthread_condattr_t* condAttr() { return _condattr; }
 
   // Stack repair handling
@@ -235,7 +235,7 @@
   // LinuxThreads work-around for 6292965
   static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
 
-private:
+ private:
   typedef int (*sched_getcpu_func_t)(void);
   typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
   typedef int (*numa_max_node_func_t)(void);
@@ -262,7 +262,7 @@
   static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
   static int sched_getcpu_syscall(void);
-public:
+ public:
   static int sched_getcpu()  { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
   static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
     return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
@@ -287,63 +287,63 @@
 
 
 class PlatformEvent : public CHeapObj<mtInternal> {
-  private:
-    double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
-    volatile int _Event;
-    volatile int _nParked;
-    pthread_mutex_t _mutex[1];
-    pthread_cond_t  _cond[1];
-    double PostPad[2];
-    Thread * _Assoc;
+ private:
+  double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
+  volatile int _Event;
+  volatile int _nParked;
+  pthread_mutex_t _mutex[1];
+  pthread_cond_t  _cond[1];
+  double PostPad[2];
+  Thread * _Assoc;
 
-  public:       // TODO-FIXME: make dtor private
-    ~PlatformEvent() { guarantee(0, "invariant"); }
+ public:       // TODO-FIXME: make dtor private
+  ~PlatformEvent() { guarantee(0, "invariant"); }
 
-  public:
-    PlatformEvent() {
-      int status;
-      status = pthread_cond_init (_cond, os::Linux::condAttr());
-      assert_status(status == 0, status, "cond_init");
-      status = pthread_mutex_init (_mutex, NULL);
-      assert_status(status == 0, status, "mutex_init");
-      _Event   = 0;
-      _nParked = 0;
-      _Assoc   = NULL;
-    }
+ public:
+  PlatformEvent() {
+    int status;
+    status = pthread_cond_init (_cond, os::Linux::condAttr());
+    assert_status(status == 0, status, "cond_init");
+    status = pthread_mutex_init (_mutex, NULL);
+    assert_status(status == 0, status, "mutex_init");
+    _Event   = 0;
+    _nParked = 0;
+    _Assoc   = NULL;
+  }
 
-    // Use caution with reset() and fired() -- they may require MEMBARs
-    void reset() { _Event = 0; }
-    int  fired() { return _Event; }
-    void park();
-    void unpark();
-    int  park(jlong millis); // relative timed-wait only
-    void SetAssociation(Thread * a) { _Assoc = a; }
+  // Use caution with reset() and fired() -- they may require MEMBARs
+  void reset() { _Event = 0; }
+  int  fired() { return _Event; }
+  void park();
+  void unpark();
+  int  park(jlong millis); // relative timed-wait only
+  void SetAssociation(Thread * a) { _Assoc = a; }
 };
 
 class PlatformParker : public CHeapObj<mtInternal> {
-  protected:
-    enum {
-        REL_INDEX = 0,
-        ABS_INDEX = 1
-    };
-    int _cur_index;  // which cond is in use: -1, 0, 1
-    pthread_mutex_t _mutex[1];
-    pthread_cond_t  _cond[2]; // one for relative times and one for abs.
+ protected:
+  enum {
+    REL_INDEX = 0,
+    ABS_INDEX = 1
+  };
+  int _cur_index;  // which cond is in use: -1, 0, 1
+  pthread_mutex_t _mutex[1];
+  pthread_cond_t  _cond[2]; // one for relative times and one for abs.
 
-  public:       // TODO-FIXME: make dtor private
-    ~PlatformParker() { guarantee(0, "invariant"); }
+ public:       // TODO-FIXME: make dtor private
+  ~PlatformParker() { guarantee(0, "invariant"); }
 
-  public:
-    PlatformParker() {
-      int status;
-      status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
-      assert_status(status == 0, status, "cond_init rel");
-      status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
-      assert_status(status == 0, status, "cond_init abs");
-      status = pthread_mutex_init (_mutex, NULL);
-      assert_status(status == 0, status, "mutex_init");
-      _cur_index = -1; // mark as unused
-    }
+ public:
+  PlatformParker() {
+    int status;
+    status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
+    assert_status(status == 0, status, "cond_init rel");
+    status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
+    assert_status(status == 0, status, "cond_init abs");
+    status = pthread_mutex_init (_mutex, NULL);
+    assert_status(status == 0, status, "mutex_init");
+    _cur_index = -1; // mark as unused
+  }
 };
 
 #endif // OS_LINUX_VM_OS_LINUX_HPP
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Sep 10 11:48:20 2014 -0600
@@ -210,8 +210,8 @@
     address sp = os::current_stack_pointer();
     guarantee(thread->_stack_base == NULL ||
               (sp <= thread->_stack_base &&
-                 sp >= thread->_stack_base - thread->_stack_size) ||
-               is_error_reported(),
+              sp >= thread->_stack_base - thread->_stack_size) ||
+              is_error_reported(),
               "sp must be inside of selected thread stack");
 
     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
@@ -332,7 +332,7 @@
 
 static int _processors_online = 0;
 
-         jint os::Solaris::_os_thread_limit = 0;
+jint os::Solaris::_os_thread_limit = 0;
 volatile jint os::Solaris::_os_thread_count = 0;
 
 julong os::available_memory() {
@@ -346,7 +346,7 @@
 julong os::Solaris::_physical_memory = 0;
 
 julong os::physical_memory() {
-   return Solaris::physical_memory();
+  return Solaris::physical_memory();
 }
 
 static hrtime_t first_hrtime = 0;
@@ -432,14 +432,14 @@
     next += 1;
   }
   if (found < *id_length) {
-      // The loop above didn't identify the expected number of processors.
-      // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
-      // and re-running the loop, above, but there's no guarantee of progress
-      // if the system configuration is in flux.  Instead, we just return what
-      // we've got.  Note that in the worst case find_processors_online() could
-      // return an empty set.  (As a fall-back in the case of the empty set we
-      // could just return the ID of the current processor).
-      *id_length = found;
+    // The loop above didn't identify the expected number of processors.
+    // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
+    // and re-running the loop, above, but there's no guarantee of progress
+    // if the system configuration is in flux.  Instead, we just return what
+    // we've got.  Note that in the worst case find_processors_online() could
+    // return an empty set.  (As a fall-back in the case of the empty set we
+    // could just return the ID of the current processor).
+    *id_length = found;
   }
 
   return true;
@@ -557,7 +557,7 @@
 bool os::getenv(const char* name, char* buffer, int len) {
   char* val = ::getenv(name);
   if (val == NULL
-  ||   strlen(val) + 1  >  len ) {
+      ||   strlen(val) + 1  >  len ) {
     if (len > 0)  buffer[0] = 0; // return a null string
     return false;
   }
@@ -932,7 +932,7 @@
 #endif
   OSThread* osthread = create_os_thread(thread, thr_self());
   if (osthread == NULL) {
-     return false;
+    return false;
   }
 
   // Initial thread state is RUNNABLE
@@ -952,9 +952,9 @@
 #endif
   if (_starting_thread == NULL) {
     _starting_thread = create_os_thread(thread, main_thread);
-     if (_starting_thread == NULL) {
-        return false;
-     }
+    if (_starting_thread == NULL) {
+      return false;
+    }
   }
 
   // The primodial thread is runnable from the start
@@ -980,27 +980,27 @@
   if (ThreadPriorityVerbose) {
     char *thrtyp;
     switch (thr_type) {
-      case vm_thread:
-        thrtyp = (char *)"vm";
-        break;
-      case cgc_thread:
-        thrtyp = (char *)"cgc";
-        break;
-      case pgc_thread:
-        thrtyp = (char *)"pgc";
-        break;
-      case java_thread:
-        thrtyp = (char *)"java";
-        break;
-      case compiler_thread:
-        thrtyp = (char *)"compiler";
-        break;
-      case watcher_thread:
-        thrtyp = (char *)"watcher";
-        break;
-      default:
-        thrtyp = (char *)"unknown";
-        break;
+    case vm_thread:
+      thrtyp = (char *)"vm";
+      break;
+    case cgc_thread:
+      thrtyp = (char *)"cgc";
+      break;
+    case pgc_thread:
+      thrtyp = (char *)"pgc";
+      break;
+    case java_thread:
+      thrtyp = (char *)"java";
+      break;
+    case compiler_thread:
+      thrtyp = (char *)"compiler";
+      break;
+    case watcher_thread:
+      thrtyp = (char *)"watcher";
+      break;
+    default:
+      thrtyp = (char *)"unknown";
+      break;
     }
     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
   }
@@ -1104,14 +1104,14 @@
 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
 
 bool os::Solaris::is_sig_ignored(int sig) {
-      struct sigaction oact;
-      sigaction(sig, (struct sigaction*)NULL, &oact);
-      void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
-                                     : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
-      if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
-           return true;
-      else
-           return false;
+  struct sigaction oact;
+  sigaction(sig, (struct sigaction*)NULL, &oact);
+  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
+                                 : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
+  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
+    return true;
+  else
+    return false;
 }
 
 // Note: SIGRTMIN is a macro that calls sysconf() so it will
@@ -1158,18 +1158,18 @@
   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
 
   if (!ReduceSignalUsage) {
-   if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
+    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
-   }
-   if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
+    }
+    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
-   }
-   if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
+    }
+    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
-   }
+    }
   }
   // Fill in signals that are blocked by all but the VM thread.
   sigemptyset(&vm_sigs);
@@ -1245,20 +1245,20 @@
 
     if (stack_size > jt->stack_size()) {
       NOT_PRODUCT(
-        struct rlimit limits;
-        getrlimit(RLIMIT_STACK, &limits);
-        size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
-        assert(size >= jt->stack_size(), "Stack size problem in main thread");
-      )
+                  struct rlimit limits;
+                  getrlimit(RLIMIT_STACK, &limits);
+                  size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
+                  assert(size >= jt->stack_size(), "Stack size problem in main thread");
+                  )
       tty->print_cr(
-        "Stack size of %d Kb exceeds current limit of %d Kb.\n"
-        "(Stack sizes are rounded up to a multiple of the system page size.)\n"
-        "See limit(1) to increase the stack size limit.",
-        stack_size / K, jt->stack_size() / K);
+                    "Stack size of %d Kb exceeds current limit of %d Kb.\n"
+                    "(Stack sizes are rounded up to a multiple of the system page size.)\n"
+                    "See limit(1) to increase the stack size limit.",
+                    stack_size / K, jt->stack_size() / K);
       vm_exit(1);
     }
     assert(jt->stack_size() >= stack_size,
-          "Attempt to map more stack than was allocated");
+           "Attempt to map more stack than was allocated");
     jt->set_stack_size(stack_size);
   }
 
@@ -1281,7 +1281,7 @@
   // The main thread must take the VMThread down synchronously
   // before the main thread exits and frees up CodeHeap
   guarantee((Thread::current()->osthread() == osthread
-     || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
+             || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
   if (Thread::current()->osthread() == osthread) {
     // Restore caller's signal mask
     sigset_t sigmask = osthread->caller_sigmask();
@@ -1338,19 +1338,19 @@
 }
 
 #define SMALLINT 32   // libthread allocate for tsd_common is a version specific
-                      // small number - point is NO swap space available
+// small number - point is NO swap space available
 void os::thread_local_storage_at_put(int index, void* value) {
   // %%% this is used only in threadLocalStorage.cpp
   if (thr_setspecific((thread_key_t)index, value)) {
     if (errno == ENOMEM) {
-       vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
-                             "thr_setspecific: out of swap space");
+      vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
+                            "thr_setspecific: out of swap space");
     } else {
       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
                     "(%s)", strerror(errno)));
     }
   } else {
-      ThreadLocalStorage::set_thread_in_slot((Thread *) value);
+    ThreadLocalStorage::set_thread_in_slot((Thread *) value);
   }
 }
 
@@ -1402,14 +1402,14 @@
 }
 
 jlong os::elapsed_frequency() {
-   return hrtime_hz;
+  return hrtime_hz;
 }
 
 // Return the real, user, and system times in seconds from an
 // arbitrary fixed point in the past.
 bool os::getTimesSecs(double* process_real_time,
-                  double* process_user_time,
-                  double* process_system_time) {
+                      double* process_user_time,
+                      double* process_system_time) {
   struct tms ticks;
   clock_t real_ticks = times(&ticks);
 
@@ -1759,9 +1759,9 @@
   dlclose(handle);
 }
 
-  // Loads .dll/.so and
-  // in case of error it checks if .dll/.so was built for the
-  // same architecture as Hotspot is running on
+// Loads .dll/.so and
+// in case of error it checks if .dll/.so was built for the
+// same architecture as Hotspot is running on
 
 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
 {
@@ -1795,7 +1795,7 @@
 
   bool failed_to_read_elf_head=
     (sizeof(elf_head)!=
-        (::read(file_descriptor, &elf_head,sizeof(elf_head))));
+     (::read(file_descriptor, &elf_head,sizeof(elf_head))));
 
   ::close(file_descriptor);
   if (failed_to_read_elf_head) {
@@ -1825,21 +1825,21 @@
   };
 
   #if  (defined IA32)
-    static  Elf32_Half running_arch_code=EM_386;
+  static  Elf32_Half running_arch_code=EM_386;
   #elif   (defined AMD64)
-    static  Elf32_Half running_arch_code=EM_X86_64;
+  static  Elf32_Half running_arch_code=EM_X86_64;
   #elif  (defined IA64)
-    static  Elf32_Half running_arch_code=EM_IA_64;
+  static  Elf32_Half running_arch_code=EM_IA_64;
   #elif  (defined __sparc) && (defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARCV9;
+  static  Elf32_Half running_arch_code=EM_SPARCV9;
   #elif  (defined __sparc) && (!defined _LP64)
-    static  Elf32_Half running_arch_code=EM_SPARC;
+  static  Elf32_Half running_arch_code=EM_SPARC;
   #elif  (defined __powerpc64__)
-    static  Elf32_Half running_arch_code=EM_PPC64;
+  static  Elf32_Half running_arch_code=EM_PPC64;
   #elif  (defined __powerpc__)
-    static  Elf32_Half running_arch_code=EM_PPC;
+  static  Elf32_Half running_arch_code=EM_PPC;
   #elif (defined ARM)
-    static  Elf32_Half running_arch_code=EM_ARM;
+  static  Elf32_Half running_arch_code=EM_ARM;
   #else
     #error Method os::dll_load requires that one of following is defined:\
          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
@@ -1862,7 +1862,7 @@
   }
 
   assert(running_arch_index != -1,
-    "Didn't find running architecture code (running_arch_code) in arch_array");
+         "Didn't find running architecture code (running_arch_code) in arch_array");
   if (running_arch_index == -1) {
     // Even though running architecture detection failed
     // we may still continue with reporting dlerror() message
@@ -1882,13 +1882,13 @@
   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
     if (lib_arch.name!=NULL) {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-        " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
-        lib_arch.name, arch_array[running_arch_index].name);
+                 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
+                 lib_arch.name, arch_array[running_arch_index].name);
     } else {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-      " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
-        lib_arch.code,
-        arch_array[running_arch_index].name);
+                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
+                 lib_arch.code,
+                 arch_array[running_arch_index].name);
     }
   }
 
@@ -1916,7 +1916,7 @@
 static bool _print_ascii_file(const char* filename, outputStream* st) {
   int fd = ::open(filename, O_RDONLY);
   if (fd == -1) {
-     return false;
+    return false;
   }
 
   char buf[32];
@@ -1954,9 +1954,9 @@
 
 void os::Solaris::print_distro_info(outputStream* st) {
   if (!_print_ascii_file("/etc/release", st)) {
-      st->print("Solaris");
-    }
-    st->cr();
+    st->print("Solaris");
+  }
+  st->cr();
 }
 
 void os::Solaris::print_libversion_info(outputStream* st) {
@@ -2055,7 +2055,7 @@
 }
 
 static void print_signal_handler(outputStream* st, int sig,
-                                  char* buf, size_t buflen) {
+                                 char* buf, size_t buflen) {
   struct sigaction sa;
 
   sigaction(sig, NULL, &sa);
@@ -2089,13 +2089,13 @@
 
   // Check: is it our handler?
   if (handler == CAST_FROM_FN_PTR(address, signalHandler) ||
-     handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
+      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
     // It is our signal handler
     // check for flags
     if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
       st->print(
-        ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
-        os::Solaris::get_our_sigflags(sig));
+                ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
+                os::Solaris::get_our_sigflags(sig));
     }
   }
   st->cr();
@@ -2240,7 +2240,7 @@
     // Ctrl-C is pressed during error reporting, likely because the error
     // handler fails to abort. Let VM die immediately.
     if (sig == SIGINT && is_error_reported()) {
-       os::die();
+      os::die();
     }
 
     os::signal_notify(sig);
@@ -2253,15 +2253,15 @@
 }
 
 class Semaphore : public StackObj {
-  public:
-    Semaphore();
-    ~Semaphore();
-    void signal();
-    void wait();
-    bool trywait();
-    bool timedwait(unsigned int sec, int nsec);
-  private:
-    sema_t _semaphore;
+ public:
+  Semaphore();
+  ~Semaphore();
+  void signal();
+  void wait();
+  bool trywait();
+  bool timedwait(unsigned int sec, int nsec);
+ private:
+  sema_t _semaphore;
 };
 
 
@@ -2360,11 +2360,11 @@
   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
 
   if (UseSignalChaining) {
-     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
-       * (Maxsignum + 1), mtInternal);
-     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
-     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
-     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
+    chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
+                                                   * (Maxsignum + 1), mtInternal);
+    memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
+    preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
+    memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
   }
   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
@@ -2405,7 +2405,7 @@
       thread->set_suspend_equivalent();
       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
       while ((ret = ::sema_wait(&sig_sem)) == EINTR)
-          ;
+        ;
       assert(ret == 0, "sema_wait() failed");
 
       // were we externally suspended while we were waiting?
@@ -2622,37 +2622,37 @@
 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
 // board. An LWP is assigned to one of these groups upon creation.
 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
-   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
-     ids[0] = 0;
-     return 1;
-   }
-   int result_size = 0, top = 1, bottom = 0, cur = 0;
-   for (int k = 0; k < size; k++) {
-     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
-                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
-     if (r == -1) {
-       ids[0] = 0;
-       return 1;
-     }
-     if (!r) {
-       // That's a leaf node.
-       assert(bottom <= cur, "Sanity check");
-       // Check if the node has memory
-       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
-                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
-         ids[bottom++] = ids[cur];
-       }
-     }
-     top += r;
-     cur++;
-   }
-   if (bottom == 0) {
-     // Handle a situation, when the OS reports no memory available.
-     // Assume UMA architecture.
-     ids[0] = 0;
-     return 1;
-   }
-   return bottom;
+  if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
+    ids[0] = 0;
+    return 1;
+  }
+  int result_size = 0, top = 1, bottom = 0, cur = 0;
+  for (int k = 0; k < size; k++) {
+    int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
+                                   (Solaris::lgrp_id_t*)&ids[top], size - top);
+    if (r == -1) {
+      ids[0] = 0;
+      return 1;
+    }
+    if (!r) {
+      // That's a leaf node.
+      assert(bottom <= cur, "Sanity check");
+      // Check if the node has memory
+      if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
+                                  NULL, 0, LGRP_RSRC_MEM) > 0) {
+        ids[bottom++] = ids[cur];
+      }
+    }
+    top += r;
+    cur++;
+  }
+  if (bottom == 0) {
+    // Handle a situation, when the OS reports no memory available.
+    // Assume UMA architecture.
+    ids[0] = 0;
+    return 1;
+  }
+  return bottom;
 }
 
 // Detect the topology change. Typically happens during CPU plugging-unplugging.
@@ -2742,9 +2742,9 @@
             break;
           }
         } else
-          if (page_expected->size != 0) {
-            break;
-          }
+        if (page_expected->size != 0) {
+          break;
+        }
 
         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
           if (outdata[types * i] != page_expected->lgrp_id) {
@@ -3132,7 +3132,7 @@
 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
   size_t res;
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
   return res;
 }
@@ -3227,9 +3227,9 @@
 
 // sched class attributes
 typedef struct {
-        int   schedPolicy;              // classID
-        int   maxPrio;
-        int   minPrio;
+  int   schedPolicy;              // classID
+  int   maxPrio;
+  int   minPrio;
 } SchedInfo;
 
 
@@ -3392,7 +3392,7 @@
   int v;
 
   if (x == 127) return rMax;            // avoid round-down
-    v = (((x*(rMax-rMin)))/128)+rMin;
+  v = (((x*(rMax-rMin)))/128)+rMin;
   return v;
 }
 
@@ -3425,8 +3425,8 @@
   if (lwpid <= 0) {
     if (ThreadPriorityVerbose) {
       tty->print_cr("deferring the set_lwp_class_and_priority of thread "
-                     INTPTR_FORMAT " to %d, lwpid not set",
-                     ThreadID, newPrio);
+                    INTPTR_FORMAT " to %d, lwpid not set",
+                    ThreadID, newPrio);
     }
     return 0;
   }
@@ -3459,7 +3459,7 @@
     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
     int maxClamped     = MIN2(iaLimits.maxPrio,
                               cur_class == new_class
-                                ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
+                              ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
                                                        maxClamped, newPrio)
                                : newPrio;
@@ -3474,7 +3474,7 @@
     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
     int maxClamped     = MIN2(tsLimits.maxPrio,
                               cur_class == new_class
-                                ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
+                              ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
                                                        maxClamped, newPrio)
                                : newPrio;
@@ -3488,7 +3488,7 @@
     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
     int maxClamped     = MIN2(fxLimits.maxPrio,
                               cur_class == new_class
-                                ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
+                              ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
                                                        maxClamped, newPrio)
                                : newPrio;
@@ -3625,16 +3625,16 @@
 
   int lwp_status =
           set_lwp_class_and_priority(osthread->thread_id(),
-          osthread->lwp_id(),
-          newpri,
-          fxcritical ? fxLimits.schedPolicy : myClass,
-          !fxcritical);
+                                     osthread->lwp_id(),
+                                     newpri,
+                                     fxcritical ? fxLimits.schedPolicy : myClass,
+                                     !fxcritical);
   if (lwp_status != 0 && fxcritical) {
     // Try again, this time without changing the scheduling class
     newpri = java_MaxPriority_to_os_priority;
     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
-            osthread->lwp_id(),
-            newpri, myClass, false);
+                                            osthread->lwp_id(),
+                                            newpri, myClass, false);
   }
   status |= lwp_status;
   return (status == 0) ? OS_OK : OS_ERR;
@@ -3830,12 +3830,12 @@
 }
 
 class PcFetcher : public os::SuspendedThreadTask {
-public:
+ public:
   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
   ExtendedPC result();
-protected:
+ protected:
   void do_task(const os::SuspendedThreadTaskContext& context);
-private:
+ private:
   ExtendedPC _epc;
 };
 
@@ -3918,12 +3918,12 @@
    does NOT participate in signal chaining due to requirement for
    NOT setting SA_RESTART to make EINTR work. */
 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
-   if (UseSignalChaining) {
-      struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
-      if (actp && actp->sa_handler) {
-        vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
-      }
-   }
+  if (UseSignalChaining) {
+    struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
+    if (actp && actp->sa_handler) {
+      vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
+    }
+  }
 }
 
 // This boolean allows users to forward their own non-matching signals
@@ -4059,8 +4059,8 @@
   // not using stack banging
   if (!UseStackBanging && sig == SIGSEGV) {
     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
-  // Interruptible i/o requires SA_RESTART cleared so EINTR
-  // is returned instead of restarting system calls
+    // Interruptible i/o requires SA_RESTART cleared so EINTR
+    // is returned instead of restarting system calls
   } else if (sig == os::Solaris::SIGinterrupt()) {
     sigemptyset(&sigAct.sa_mask);
     sigAct.sa_handler = NULL;
@@ -4142,34 +4142,34 @@
 
 
   switch (sig) {
-    case SIGSEGV:
-    case SIGBUS:
-    case SIGFPE:
-    case SIGPIPE:
-    case SIGXFSZ:
-    case SIGILL:
+  case SIGSEGV:
+  case SIGBUS:
+  case SIGFPE:
+  case SIGPIPE:
+  case SIGXFSZ:
+  case SIGILL:
+    jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
+    break;
+
+  case SHUTDOWN1_SIGNAL:
+  case SHUTDOWN2_SIGNAL:
+  case SHUTDOWN3_SIGNAL:
+  case BREAK_SIGNAL:
+    jvmHandler = (address)user_handler();
+    break;
+
+  default:
+    int intrsig = os::Solaris::SIGinterrupt();
+    int asynsig = os::Solaris::SIGasync();
+
+    if (sig == intrsig) {
+      jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
+    } else if (sig == asynsig) {
       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
-      break;
-
-    case SHUTDOWN1_SIGNAL:
-    case SHUTDOWN2_SIGNAL:
-    case SHUTDOWN3_SIGNAL:
-    case BREAK_SIGNAL:
-      jvmHandler = (address)user_handler();
-      break;
-
-    default:
-      int intrsig = os::Solaris::SIGinterrupt();
-      int asynsig = os::Solaris::SIGasync();
-
-      if (sig == intrsig) {
-        jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
-      } else if (sig == asynsig) {
-        jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
-      } else {
-        return;
-      }
-      break;
+    } else {
+      return;
+    }
+    break;
   }
 
 
@@ -4240,7 +4240,7 @@
     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
     // can not register overridable signals which might be > 32
     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
-    // Tell libjsig jvm has finished setting signal handlers
+      // Tell libjsig jvm has finished setting signal handlers
       (*end_signal_setting)();
       libjsigdone = true;
     }
@@ -4293,9 +4293,9 @@
   if (0 < exception_code && exception_code <= SIGRTMAX) {
     // signal
     if (exception_code < sizeof(signames)/sizeof(const char*)) {
-       jio_snprintf(buf, size, "%s", signames[exception_code]);
+      jio_snprintf(buf, size, "%s", signames[exception_code]);
     } else {
-       jio_snprintf(buf, size, "SIG%d", exception_code);
+      jio_snprintf(buf, size, "SIG%d", exception_code);
     }
     return buf;
   } else {
@@ -4436,7 +4436,7 @@
     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
-                                       dlsym(handle, "lgrp_cookie_stale")));
+                                                      dlsym(handle, "lgrp_cookie_stale")));
 
     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
     set_lgrp_cookie(c);
@@ -4587,12 +4587,12 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
-            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
-                    2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
+                                        (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+                                        2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
-    threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
+      threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
                   os::Solaris::min_stack_allowed/K);
     return JNI_ERR;
@@ -4606,17 +4606,17 @@
   // should be to fix the guard page mechanism.
 
   if (vm_page_size() > 8*K) {
-      threadStackSizeInBytes = (threadStackSizeInBytes != 0)
-         ? threadStackSizeInBytes +
-           ((StackYellowPages + StackRedPages) * vm_page_size())
-         : 0;
-      ThreadStackSize = threadStackSizeInBytes/K;
+    threadStackSizeInBytes = (threadStackSizeInBytes != 0)
+       ? threadStackSizeInBytes +
+         ((StackYellowPages + StackRedPages) * vm_page_size())
+       : 0;
+    ThreadStackSize = threadStackSizeInBytes/K;
   }
 
   // Make the stack size a multiple of the page size so that
   // the yellow/red zones can be guarded.
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
-        vm_page_size()));
+                                                vm_page_size()));
 
   Solaris::libthread_init();
 
@@ -4737,7 +4737,7 @@
     //search  for the named symbol in the objects that were loaded after libjvm
     void* where = RTLD_NEXT;
     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
-        sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
+      sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
     if (!sol_vsnprintf){
       //search  for the named symbol in the objects that were loaded before libjvm
       where = RTLD_DEFAULT;
@@ -4846,13 +4846,13 @@
      * http://technopark02.blogspot.com/2005_05_01_archive.html
      */
 #ifndef  _LP64
-     if ((!enabled_extended_FILE_stdio) && fd < 256) {
-         int newfd = ::fcntl(fd, F_DUPFD, 256);
-         if (newfd != -1) {
-             ::close(fd);
-             fd = newfd;
-         }
-     }
+  if ((!enabled_extended_FILE_stdio) && fd < 256) {
+    int newfd = ::fcntl(fd, F_DUPFD, 256);
+    if (newfd != -1) {
+      ::close(fd);
+      fd = newfd;
+    }
+  }
 #endif // 32-bit Solaris
     /*
      * All file descriptors that are opened in the JVM and not
@@ -4877,11 +4877,11 @@
      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
      */
 #ifdef FD_CLOEXEC
-    {
-        int flags = ::fcntl(fd, F_GETFD);
-        if (flags != -1)
-            ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
-    }
+  {
+    int flags = ::fcntl(fd, F_GETFD);
+    if (flags != -1)
+      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
+  }
 #endif
 
   if (o_delete != 0) {
@@ -4927,7 +4927,7 @@
 
 int os::available(int fd, jlong *bytes) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   jlong cur, end;
   int mode;
   struct stat64 buf64;
@@ -4939,7 +4939,7 @@
 
       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
       if (ioctl_return>= 0) {
-          *bytes = n;
+        *bytes = n;
         return 1;
       }
     }
@@ -4957,8 +4957,8 @@
 
 // Map a block of memory.
 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
-                     char *addr, size_t bytes, bool read_only,
-                     bool allow_exec) {
+                        char *addr, size_t bytes, bool read_only,
+                        bool allow_exec) {
   int prot;
   int flags;
 
@@ -4989,8 +4989,8 @@
 
 // Remap a block of memory.
 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
-                       char *addr, size_t bytes, bool read_only,
-                       bool allow_exec) {
+                          char *addr, size_t bytes, bool read_only,
+                          bool allow_exec) {
   // same as map_memory() on this OS
   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
                         allow_exec);
@@ -5019,7 +5019,7 @@
     }
   } else {
     jio_fprintf(stderr,
-      "Could not open pause file '%s', continuing immediately.\n", filename);
+                "Could not open pause file '%s', continuing immediately.\n", filename);
   }
 }
 
@@ -5208,16 +5208,16 @@
   int fd;
 
   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
-                     getpid(),
-                     thread->osthread()->lwp_id());
+          getpid(),
+          thread->osthread()->lwp_id());
   fd = ::open(proc_name, O_RDONLY);
   if (fd == -1) return -1;
 
   do {
     count = ::pread(fd,
-                  (void *)&prusage.pr_utime,
-                  thr_time_size,
-                  thr_time_off);
+                    (void *)&prusage.pr_utime,
+                    thr_time_size,
+                    thr_time_off);
   } while (count < 0 && errno == EINTR);
   ::close(fd);
   if (count < 0) return -1;
@@ -5413,15 +5413,15 @@
     // leave it alone rather than always rounding down.
 
     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
-       // It appears that when we go directly through Solaris _lwp_cond_timedwait()
-           // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
-           max_wait_period = 21000000;
+    // It appears that when we go directly through Solaris _lwp_cond_timedwait()
+    // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
+    max_wait_period = 21000000;
   } else {
     max_wait_period = 50000000;
   }
   millis %= 1000;
   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
-     seconds = max_wait_period;
+    seconds = max_wait_period;
   }
   abstime->tv_sec = now.tv_sec  + seconds;
   long       usec = now.tv_usec + millis * 1000;
@@ -5440,34 +5440,34 @@
 
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v == 0) {
-     // Do this the hard way by blocking ...
-     // See http://monaco.sfbay/detail.jsf?cr=5094058.
-     // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
-     // Only for SPARC >= V8PlusA
+    // Do this the hard way by blocking ...
+    // See http://monaco.sfbay/detail.jsf?cr=5094058.
+    // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
+    // Only for SPARC >= V8PlusA
 #if defined(__sparc) && defined(COMPILER2)
-     if (ClearFPUAtPark) { _mark_fpu_nosave(); }
+    if (ClearFPUAtPark) { _mark_fpu_nosave(); }
 #endif
-     int status = os::Solaris::mutex_lock(_mutex);
-     assert_status(status == 0, status, "mutex_lock");
-     guarantee(_nParked == 0, "invariant");
-     ++_nParked;
-     while (_Event < 0) {
-        // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
-        // Treat this the same as if the wait was interrupted
-        // With usr/lib/lwp going to kernel, always handle ETIME
-        status = os::Solaris::cond_wait(_cond, _mutex);
-        if (status == ETIME) status = EINTR;
-        assert_status(status == 0 || status == EINTR, status, "cond_wait");
-     }
-     --_nParked;
-     _Event = 0;
-     status = os::Solaris::mutex_unlock(_mutex);
-     assert_status(status == 0, status, "mutex_unlock");
+    int status = os::Solaris::mutex_lock(_mutex);
+    assert_status(status == 0, status, "mutex_lock");
+    guarantee(_nParked == 0, "invariant");
+    ++_nParked;
+    while (_Event < 0) {
+      // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
+      // Treat this the same as if the wait was interrupted
+      // With usr/lib/lwp going to kernel, always handle ETIME
+      status = os::Solaris::cond_wait(_cond, _mutex);
+      if (status == ETIME) status = EINTR;
+      assert_status(status == 0 || status == EINTR, status, "cond_wait");
+    }
+    --_nParked;
+    _Event = 0;
+    status = os::Solaris::mutex_unlock(_mutex);
+    assert_status(status == 0, status, "mutex_unlock");
     // Paranoia to ensure our locked and lock-free paths interact
     // correctly with each other.
     OrderAccess::fence();
@@ -5478,8 +5478,8 @@
   guarantee(_nParked == 0, "invariant");
   int v;
   for (;;) {
-      v = _Event;
-      if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
   }
   guarantee(v >= 0, "invariant");
   if (v != 0) return OS_OK;
@@ -5492,20 +5492,20 @@
   // For Solaris SPARC set fprs.FEF=0 prior to parking.
   // Only for SPARC >= V8PlusA
 #if defined(__sparc) && defined(COMPILER2)
- if (ClearFPUAtPark) { _mark_fpu_nosave(); }
+  if (ClearFPUAtPark) { _mark_fpu_nosave(); }
 #endif
   int status = os::Solaris::mutex_lock(_mutex);
   assert_status(status == 0, status, "mutex_lock");
   guarantee(_nParked == 0, "invariant");
   ++_nParked;
   while (_Event < 0) {
-     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
-     assert_status(status == 0 || status == EINTR ||
-                   status == ETIME || status == ETIMEDOUT,
-                   status, "cond_timedwait");
-     if (!FilterSpuriousWakeups) break;                // previous semantics
-     if (status == ETIME || status == ETIMEDOUT) break;
-     // We consume and ignore EINTR and spurious wakeups.
+    int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
+    assert_status(status == 0 || status == EINTR ||
+                  status == ETIME || status == ETIMEDOUT,
+                  status, "cond_timedwait");
+    if (!FilterSpuriousWakeups) break;                // previous semantics
+    if (status == ETIME || status == ETIMEDOUT) break;
+    // We consume and ignore EINTR and spurious wakeups.
   }
   --_nParked;
   if (_Event >= 0) ret = OS_OK;
@@ -5786,26 +5786,26 @@
     // Wait for the child process to exit.  This returns immediately if
     // the child has already exited. */
     while (waitpid(pid, &status, 0) < 0) {
-        switch (errno) {
-        case ECHILD: return 0;
-        case EINTR: break;
-        default: return -1;
-        }
+      switch (errno) {
+      case ECHILD: return 0;
+      case EINTR: break;
+      default: return -1;
+      }
     }
 
     if (WIFEXITED(status)) {
-       // The child exited normally; get its exit code.
-       return WEXITSTATUS(status);
+      // The child exited normally; get its exit code.
+      return WEXITSTATUS(status);
     } else if (WIFSIGNALED(status)) {
-       // The child exited because of a signal
-       // The best value to return is 0x80 + signal number,
-       // because that is what all Unix shells do, and because
-       // it allows callers to distinguish between process exit and
-       // process death by signal.
-       return 0x80 + WTERMSIG(status);
+      // The child exited because of a signal
+      // The best value to return is 0x80 + signal number,
+      // because that is what all Unix shells do, and because
+      // it allows callers to distinguish between process exit and
+      // process death by signal.
+      return 0x80 + WTERMSIG(status);
     } else {
-       // Unknown exit code; pass it through
-       return status;
+      // Unknown exit code; pass it through
+      return status;
     }
   }
 }
@@ -5819,43 +5819,43 @@
 // as libawt.so, and renamed libawt_xawt.so
 //
 bool os::is_headless_jre() {
-    struct stat statbuf;
-    char buf[MAXPATHLEN];
-    char libmawtpath[MAXPATHLEN];
-    const char *xawtstr  = "/xawt/libmawt.so";
-    const char *new_xawtstr = "/libawt_xawt.so";
-    char *p;
-
-    // Get path to libjvm.so
-    os::jvm_path(buf, sizeof(buf));
-
-    // Get rid of libjvm.so
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // Get rid of client or server
-    p = strrchr(buf, '/');
-    if (p == NULL) return false;
-    else *p = '\0';
-
-    // check xawt/libmawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    // check libawt_xawt.so
-    strcpy(libmawtpath, buf);
-    strcat(libmawtpath, new_xawtstr);
-    if (::stat(libmawtpath, &statbuf) == 0) return false;
-
-    return true;
+  struct stat statbuf;
+  char buf[MAXPATHLEN];
+  char libmawtpath[MAXPATHLEN];
+  const char *xawtstr  = "/xawt/libmawt.so";
+  const char *new_xawtstr = "/libawt_xawt.so";
+  char *p;
+
+  // Get path to libjvm.so
+  os::jvm_path(buf, sizeof(buf));
+
+  // Get rid of libjvm.so
+  p = strrchr(buf, '/');
+  if (p == NULL) return false;
+  else *p = '\0';
+
+  // Get rid of client or server
+  p = strrchr(buf, '/');
+  if (p == NULL) return false;
+  else *p = '\0';
+
+  // check xawt/libmawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  // check libawt_xawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, new_xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  return true;
 }
 
 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
   size_t res;
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
   return res;
 }
@@ -5870,13 +5870,13 @@
 
 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
 }
 
 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
 }
 
@@ -5899,7 +5899,7 @@
   pfd.events = POLLIN;
 
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
 
   gettimeofday(&t, &aNull);
   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
@@ -5907,14 +5907,14 @@
   for (;;) {
     res = ::poll(&pfd, 1, timeout);
     if (res == OS_ERR && errno == EINTR) {
-        if (timeout != -1) {
-          gettimeofday(&t, &aNull);
-          newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
-          timeout -= newtime - prevtime;
-          if (timeout <= 0)
-            return OS_OK;
-          prevtime = newtime;
-        }
+      if (timeout != -1) {
+        gettimeofday(&t, &aNull);
+        newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
+        timeout -= newtime - prevtime;
+        if (timeout <= 0)
+          return OS_OK;
+        prevtime = newtime;
+      }
     } else return res;
   }
 }
@@ -5944,40 +5944,40 @@
   //     EISCONN          The socket is already connected.
   if (_result == OS_ERR && errno == EINTR) {
      /* restarting a connect() changes its errno semantics */
-     RESTARTABLE(::connect(fd, him, len), _result);
+    RESTARTABLE(::connect(fd, him, len), _result);
      /* undo these changes */
-     if (_result == OS_ERR) {
-       if (errno == EALREADY) {
-         errno = EINPROGRESS; /* fall through */
-       } else if (errno == EISCONN) {
-         errno = 0;
-         return OS_OK;
-       }
-     }
-   }
-   return _result;
- }
+    if (_result == OS_ERR) {
+      if (errno == EALREADY) {
+        errno = EINPROGRESS; /* fall through */
+      } else if (errno == EISCONN) {
+        errno = 0;
+        return OS_OK;
+      }
+    }
+  }
+  return _result;
+}
 
 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
   if (fd < 0) {
     return OS_ERR;
   }
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE_RETURN_INT((int)::accept(fd, him, len));
 }
 
 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
                  sockaddr* from, socklen_t* fromlen) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
 }
 
 int os::sendto(int fd, char* buf, size_t len, uint flags,
                struct sockaddr* to, socklen_t tolen) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
+         "Assumed _thread_in_native");
   RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
 }
 
@@ -5994,8 +5994,8 @@
 
 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
-          "Assumed _thread_in_native");
-   return ::bind(fd, him, len);
+         "Assumed _thread_in_native");
+  return ::bind(fd, him, len);
 }
 
 // Get the default path to the core file
--- a/hotspot/src/os/solaris/vm/os_solaris.hpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.hpp	Wed Sep 10 11:48:20 2014 -0600
@@ -61,8 +61,8 @@
   typedef id_t            lgrp_id_t;
   typedef int             lgrp_rsrc_t;
   typedef enum lgrp_view {
-        LGRP_VIEW_CALLER,       /* what's available to the caller */
-        LGRP_VIEW_OS            /* what's available to operating system */
+    LGRP_VIEW_CALLER,       /* what's available to the caller */
+    LGRP_VIEW_OS            /* what's available to operating system */
   } lgrp_view_t;
 
   typedef uint_t (*getisax_func_t)(uint32_t* array, uint_t n);
@@ -74,8 +74,8 @@
   typedef int (*lgrp_children_func_t)(lgrp_cookie_t  cookie,  lgrp_id_t  parent,
                                       lgrp_id_t *lgrp_array, uint_t lgrp_array_size);
   typedef int (*lgrp_resources_func_t)(lgrp_cookie_t  cookie,  lgrp_id_t  lgrp,
-                                      lgrp_id_t *lgrp_array, uint_t lgrp_array_size,
-                                      lgrp_rsrc_t type);
+                                       lgrp_id_t *lgrp_array, uint_t lgrp_array_size,
+                                       lgrp_rsrc_t type);
   typedef int (*lgrp_nlgrps_func_t)(lgrp_cookie_t cookie);
   typedef int (*lgrp_cookie_stale_func_t)(lgrp_cookie_t cookie);
   typedef int (*meminfo_func_t)(const uint64_t inaddr[],   int addr_count,
@@ -128,7 +128,7 @@
   static bool valid_stack_address(Thread* thread, address sp);
   static bool valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect);
   static ucontext_t* get_valid_uc_in_signal_handler(Thread* thread,
-    ucontext_t* uc);
+                                                    ucontext_t* uc);
 
   static ExtendedPC  ucontext_get_ExtendedPC(ucontext_t* uc);
   static intptr_t*   ucontext_get_sp(ucontext_t* uc);
@@ -143,7 +143,7 @@
   // os_solaris_i486.hpp and os_solaris_sparc.hpp, but that file
   // provides extensions to the os class and not the Solaris class.
   static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
-    intptr_t** ret_sp, intptr_t** ret_fp);
+                                              intptr_t** ret_sp, intptr_t** ret_fp);
 
   static void hotspot_sigmask(Thread* thread);
 
@@ -249,7 +249,7 @@
   static int lgrp_fini(lgrp_cookie_t cookie)         { return _lgrp_fini != NULL ? _lgrp_fini(cookie) : -1; }
   static lgrp_id_t lgrp_root(lgrp_cookie_t cookie)   { return _lgrp_root != NULL ? _lgrp_root(cookie) : -1; };
   static int lgrp_children(lgrp_cookie_t  cookie,  lgrp_id_t  parent,
-                    lgrp_id_t *lgrp_array, uint_t lgrp_array_size) {
+                           lgrp_id_t *lgrp_array, uint_t lgrp_array_size) {
     return _lgrp_children != NULL ? _lgrp_children(cookie, parent, lgrp_array, lgrp_array_size) : -1;
   }
   static int lgrp_resources(lgrp_cookie_t  cookie,  lgrp_id_t  lgrp,
@@ -269,8 +269,8 @@
 
   static void set_meminfo(meminfo_func_t func)       { _meminfo = func; }
   static int meminfo (const uint64_t inaddr[],   int addr_count,
-                     const uint_t  info_req[],  int info_count,
-                     uint64_t  outdata[], uint_t validity[]) {
+                      const uint_t  info_req[],  int info_count,
+                      uint64_t  outdata[], uint_t validity[]) {
     return _meminfo != NULL ? _meminfo(inaddr, addr_count, info_req, info_count,
                                        outdata, validity) : -1;
   }
@@ -300,57 +300,57 @@
 };
 
 class PlatformEvent : public CHeapObj<mtInternal> {
-  private:
-    double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
-    volatile int _Event;
-    int _nParked;
-    int _pipev[2];
-    mutex_t _mutex[1];
-    cond_t  _cond[1];
-    double PostPad[2];
+ private:
+  double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
+  volatile int _Event;
+  int _nParked;
+  int _pipev[2];
+  mutex_t _mutex[1];
+  cond_t  _cond[1];
+  double PostPad[2];
 
-  protected:
-    // Defining a protected ctor effectively gives us an abstract base class.
-    // That is, a PlatformEvent can never be instantiated "naked" but only
-    // as a part of a ParkEvent (recall that ParkEvent extends PlatformEvent).
-    // TODO-FIXME: make dtor private
-    ~PlatformEvent() { guarantee(0, "invariant"); }
-    PlatformEvent() {
-      int status;
-      status = os::Solaris::cond_init(_cond);
-      assert_status(status == 0, status, "cond_init");
-      status = os::Solaris::mutex_init(_mutex);
-      assert_status(status == 0, status, "mutex_init");
-      _Event   = 0;
-      _nParked = 0;
-      _pipev[0] = _pipev[1] = -1;
-    }
+ protected:
+  // Defining a protected ctor effectively gives us an abstract base class.
+  // That is, a PlatformEvent can never be instantiated "naked" but only
+  // as a part of a ParkEvent (recall that ParkEvent extends PlatformEvent).
+  // TODO-FIXME: make dtor private
+  ~PlatformEvent() { guarantee(0, "invariant"); }
+  PlatformEvent() {
+    int status;
+    status = os::Solaris::cond_init(_cond);
+    assert_status(status == 0, status, "cond_init");
+    status = os::Solaris::mutex_init(_mutex);
+    assert_status(status == 0, status, "mutex_init");
+    _Event   = 0;
+    _nParked = 0;
+    _pipev[0] = _pipev[1] = -1;
+  }
 
-  public:
-    // Exercise caution using reset() and fired() -- they may require MEMBARs
-    void reset() { _Event = 0; }
-    int  fired() { return _Event; }
-    void park();
-    int  park(jlong millis);
-    void unpark();
+ public:
+  // Exercise caution using reset() and fired() -- they may require MEMBARs
+  void reset() { _Event = 0; }
+  int  fired() { return _Event; }
+  void park();
+  int  park(jlong millis);
+  void unpark();
 };
 
 class PlatformParker : public CHeapObj<mtInternal> {
-  protected:
-    mutex_t _mutex[1];
-    cond_t  _cond[1];
+ protected:
+  mutex_t _mutex[1];
+  cond_t  _cond[1];
 
-  public:       // TODO-FIXME: make dtor private
-    ~PlatformParker() { guarantee(0, "invariant"); }
+ public:       // TODO-FIXME: make dtor private
+  ~PlatformParker() { guarantee(0, "invariant"); }
 
-  public:
-    PlatformParker() {
-      int status;
-      status = os::Solaris::cond_init(_cond);
-      assert_status(status == 0, status, "cond_init");
-      status = os::Solaris::mutex_init(_mutex);
-      assert_status(status == 0, status, "mutex_init");
-    }
+ public:
+  PlatformParker() {
+    int status;
+    status = os::Solaris::cond_init(_cond);
+    assert_status(status == 0, status, "cond_init");
+    status = os::Solaris::mutex_init(_mutex);
+    assert_status(status == 0, status, "mutex_init");
+  }
 };
 
 #endif // OS_SOLARIS_VM_OS_SOLARIS_HPP
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Wed Sep 10 11:48:20 2014 -0600
@@ -126,18 +126,18 @@
 
 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
   switch (reason) {
-    case DLL_PROCESS_ATTACH:
-      vm_lib_handle = hinst;
-      if (ForceTimeHighResolution)
-        timeBeginPeriod(1L);
-      break;
-    case DLL_PROCESS_DETACH:
-      if (ForceTimeHighResolution)
-        timeEndPeriod(1L);
-
-      break;
-    default:
-      break;
+  case DLL_PROCESS_ATTACH:
+    vm_lib_handle = hinst;
+    if (ForceTimeHighResolution)
+      timeBeginPeriod(1L);
+    break;
+  case DLL_PROCESS_DETACH:
+    if (ForceTimeHighResolution)
+      timeEndPeriod(1L);
+
+    break;
+  default:
+    break;
   }
   return true;
 }
@@ -153,8 +153,8 @@
 // Implementation of os
 
 bool os::getenv(const char* name, char* buffer, int len) {
- int result = GetEnvironmentVariable(name, buffer, len);
- return result > 0 && result < len;
+  int result = GetEnvironmentVariable(name, buffer, len);
+  return result > 0 && result < len;
 }
 
 bool os::unsetenv(const char* name) {
@@ -182,41 +182,41 @@
 void os::init_system_properties_values() {
   /* sysclasspath, java_home, dll_dir */
   {
-      char *home_path;
-      char *dll_path;
-      char *pslash;
-      char *bin = "\\bin";
-      char home_dir[MAX_PATH];
-
-      if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
-          os::jvm_path(home_dir, sizeof(home_dir));
-          // Found the full path to jvm.dll.
-          // Now cut the path to <java_home>/jre if we can.
-          *(strrchr(home_dir, '\\')) = '\0';  /* get rid of \jvm.dll */
-          pslash = strrchr(home_dir, '\\');
-          if (pslash != NULL) {
-              *pslash = '\0';                 /* get rid of \{client|server} */
-              pslash = strrchr(home_dir, '\\');
-              if (pslash != NULL)
-                  *pslash = '\0';             /* get rid of \bin */
-          }
+    char *home_path;
+    char *dll_path;
+    char *pslash;
+    char *bin = "\\bin";
+    char home_dir[MAX_PATH];
+
+    if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
+      os::jvm_path(home_dir, sizeof(home_dir));
+      // Found the full path to jvm.dll.
+      // Now cut the path to <java_home>/jre if we can.
+      *(strrchr(home_dir, '\\')) = '\0';  /* get rid of \jvm.dll */
+      pslash = strrchr(home_dir, '\\');
+      if (pslash != NULL) {
+        *pslash = '\0';                 /* get rid of \{client|server} */
+        pslash = strrchr(home_dir, '\\');
+        if (pslash != NULL)
+          *pslash = '\0';             /* get rid of \bin */
       }
-
-      home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
-      if (home_path == NULL)
-          return;
-      strcpy(home_path, home_dir);
-      Arguments::set_java_home(home_path);
-
-      dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal);
-      if (dll_path == NULL)
-          return;
-      strcpy(dll_path, home_dir);
-      strcat(dll_path, bin);
-      Arguments::set_dll_dir(dll_path);
-
-      if (!set_boot_path('\\', ';'))
-          return;
+    }
+
+    home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
+    if (home_path == NULL)
+      return;
+    strcpy(home_path, home_dir);
+    Arguments::set_java_home(home_path);
+
+    dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal);
+    if (dll_path == NULL)
+      return;
+    strcpy(dll_path, home_dir);
+    strcat(dll_path, bin);
+    Arguments::set_dll_dir(dll_path);
+
+    if (!set_boot_path('\\', ';'))
+      return;
   }
 
   /* library_path */
@@ -239,7 +239,7 @@
     char *path_str = ::getenv("PATH");
 
     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
-        sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
+                                    sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 
     library_path[0] = '\0';
 
@@ -261,8 +261,8 @@
     strcat(library_path, tmp);
 
     if (path_str) {
-        strcat(library_path, ";");
-        strcat(library_path, path_str);
+      strcat(library_path, ";");
+      strcat(library_path, path_str);
     }
 
     strcat(library_path, ";.");
@@ -277,7 +277,7 @@
     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
     GetWindowsDirectory(path, MAX_PATH);
     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
-        path, PACKAGE_DIR, EXT_DIR);
+            path, PACKAGE_DIR, EXT_DIR);
     Arguments::set_ext_dirs(buf);
   }
   #undef EXT_DIR
@@ -322,7 +322,7 @@
   toSkip ++;
 #endif
   int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
-    (PVOID*)stack, NULL);
+                                                       (PVOID*)stack, NULL);
   for (int index = captured; index < frames; index ++) {
     stack[index] = NULL;
   }
@@ -445,10 +445,10 @@
   // by VM, so VM can generate error dump when an exception occurred in non-
   // Java thread (e.g. VM thread).
   __try {
-     thread->run();
+    thread->run();
   } __except(topLevelExceptionFilter(
-             (_EXCEPTION_POINTERS*)_exception_info())) {
-      // Nothing to do.
+                                     (_EXCEPTION_POINTERS*)_exception_info())) {
+    // Nothing to do.
   }
 
   // One less thread is executing
@@ -509,7 +509,7 @@
   OSThread* osthread = create_os_thread(thread, thread_h,
                                         (int)current_thread_id());
   if (osthread == NULL) {
-     return false;
+    return false;
   }
 
   // Initial thread state is RUNNABLE
@@ -525,9 +525,9 @@
 #endif
   if (_starting_thread == NULL) {
     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
-     if (_starting_thread == NULL) {
-        return false;
-     }
+    if (_starting_thread == NULL) {
+      return false;
+    }
   }
 
   // The primordial thread is runnable from the start)
@@ -616,12 +616,12 @@
     // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again
     // without the flag.
     thread_handle =
-    (HANDLE)_beginthreadex(NULL,
-                           (unsigned)stack_size,
-                           (unsigned (__stdcall *)(void*)) java_start,
-                           thread,
-                           CREATE_SUSPENDED,
-                           &thread_id);
+      (HANDLE)_beginthreadex(NULL,
+                             (unsigned)stack_size,
+                             (unsigned (__stdcall *)(void*)) java_start,
+                             thread,
+                             CREATE_SUSPENDED,
+                             &thread_id);
   }
   if (thread_handle == NULL) {
     // Need to clean up stuff we've allocated so far
@@ -683,8 +683,8 @@
   if (win32::_has_performance_count) {
     return performance_frequency;
   } else {
-   // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601.
-   return 10000000;
+    // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601.
+    return 10000000;
   }
 }
 
@@ -916,15 +916,15 @@
 }
 
 bool os::getTimesSecs(double* process_real_time,
-                     double* process_user_time,
-                     double* process_system_time) {
+                      double* process_user_time,
+                      double* process_system_time) {
   HANDLE h_process = GetCurrentProcess();
   FILETIME create_time, exit_time, kernel_time, user_time;
   BOOL result = GetProcessTimes(h_process,
-                               &create_time,
-                               &exit_time,
-                               &kernel_time,
-                               &user_time);
+                                &create_time,
+                                &exit_time,
+                                &kernel_time,
+                                &user_time);
   if (result != 0) {
     FILETIME wt;
     GetSystemTimeAsFileTime(&wt);
@@ -997,9 +997,9 @@
   }
 
   _MiniDumpWriteDump = CAST_TO_FN_PTR(
-    BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
-    PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION),
-    GetProcAddress(dbghelp, "MiniDumpWriteDump"));
+                                      BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
+                                      PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION),
+                                      GetProcAddress(dbghelp, "MiniDumpWriteDump"));
 
   if (_MiniDumpWriteDump == NULL) {
     VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false);
@@ -1012,7 +1012,7 @@
 // API_VERSION_NUMBER 11 or higher contains the ones we want though
 #if API_VERSION_NUMBER >= 11
   dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo |
-    MiniDumpWithUnloadedModules);
+                             MiniDumpWithUnloadedModules);
 #endif
 
   cwd = get_current_directory(NULL, 0);
@@ -1039,21 +1039,21 @@
   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
   if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
       _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
-        DWORD error = GetLastError();
-        LPTSTR msgbuf = NULL;
-
-        if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+    DWORD error = GetLastError();
+    LPTSTR msgbuf = NULL;
+
+    if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
                       FORMAT_MESSAGE_FROM_SYSTEM |
                       FORMAT_MESSAGE_IGNORE_INSERTS,
                       NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) {
 
-          jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf);
-          LocalFree(msgbuf);
-        } else {
-          // Call to FormatMessage failed, just include the result from GetLastError
-          jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error);
-        }
-        VMError::report_coredump_status(buffer, false);
+      jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf);
+      LocalFree(msgbuf);
+    } else {
+      // Call to FormatMessage failed, just include the result from GetLastError
+      jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error);
+    }
+    VMError::report_coredump_status(buffer, false);
   } else {
     VMError::report_coredump_status(buffer, true);
   }
@@ -1086,70 +1086,70 @@
 DIR *
 os::opendir(const char *dirname)
 {
-    assert(dirname != NULL, "just checking");   // hotspot change
-    DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
-    DWORD fattr;                                // hotspot change
-    char alt_dirname[4] = { 0, 0, 0, 0 };
-
-    if (dirp == 0) {
-        errno = ENOMEM;
-        return 0;
-    }
+  assert(dirname != NULL, "just checking");   // hotspot change
+  DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
+  DWORD fattr;                                // hotspot change
+  char alt_dirname[4] = { 0, 0, 0, 0 };
+
+  if (dirp == 0) {
+    errno = ENOMEM;
+    return 0;
+  }
 
     /*
      * Win32 accepts "\" in its POSIX stat(), but refuses to treat it
      * as a directory in FindFirstFile().  We detect this case here and
      * prepend the current drive name.
      */
-    if (dirname[1] == '\0' && dirname[0] == '\\') {
-        alt_dirname[0] = _getdrive() + 'A' - 1;
-        alt_dirname[1] = ':';
-        alt_dirname[2] = '\\';
-        alt_dirname[3] = '\0';
-        dirname = alt_dirname;
-    }
-
-    dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
-    if (dirp->path == 0) {
-        free(dirp, mtInternal);
-        errno = ENOMEM;
-        return 0;
-    }
-    strcpy(dirp->path, dirname);
-
-    fattr = GetFileAttributes(dirp->path);
-    if (fattr == 0xffffffff) {
-        free(dirp->path, mtInternal);
-        free(dirp, mtInternal);
-        errno = ENOENT;
-        return 0;
-    } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
-        free(dirp->path, mtInternal);
-        free(dirp, mtInternal);
-        errno = ENOTDIR;
-        return 0;
-    }
+  if (dirname[1] == '\0' && dirname[0] == '\\') {
+    alt_dirname[0] = _getdrive() + 'A' - 1;
+    alt_dirname[1] = ':';
+    alt_dirname[2] = '\\';
+    alt_dirname[3] = '\0';
+    dirname = alt_dirname;
+  }
+
+  dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
+  if (dirp->path == 0) {
+    free(dirp, mtInternal);
+    errno = ENOMEM;
+    return 0;
+  }
+  strcpy(dirp->path, dirname);
+
+  fattr = GetFileAttributes(dirp->path);
+  if (fattr == 0xffffffff) {
+    free(dirp->path, mtInternal);
+    free(dirp, mtInternal);
+    errno = ENOENT;
+    return 0;
+  } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
+    free(dirp->path, mtInternal);
+    free(dirp, mtInternal);
+    errno = ENOTDIR;
+    return 0;
+  }
 
     /* Append "*.*", or possibly "\\*.*", to path */
-    if (dirp->path[1] == ':'
-        && (dirp->path[2] == '\0'
-            || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
+  if (dirp->path[1] == ':'
+      && (dirp->path[2] == '\0'
+      || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
         /* No '\\' needed for cases like "Z:" or "Z:\" */
-        strcat(dirp->path, "*.*");
-    } else {
-        strcat(dirp->path, "\\*.*");
+    strcat(dirp->path, "*.*");
+  } else {
+    strcat(dirp->path, "\\*.*");
+  }
+
+  dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
+  if (dirp->handle == INVALID_HANDLE_VALUE) {
+    if (GetLastError() != ERROR_FILE_NOT_FOUND) {
+      free(dirp->path, mtInternal);
+      free(dirp, mtInternal);
+      errno = EACCES;
+      return 0;
     }
-
-    dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
-    if (dirp->handle == INVALID_HANDLE_VALUE) {
-        if (GetLastError() != ERROR_FILE_NOT_FOUND) {
-            free(dirp->path, mtInternal);
-            free(dirp, mtInternal);
-            errno = EACCES;
-            return 0;
-        }
-    }
-    return dirp;
+  }
+  return dirp;
 }
 
 /* parameter dbuf unused on Windows */
@@ -1157,39 +1157,39 @@
 struct dirent *
 os::readdir(DIR *dirp, dirent *dbuf)
 {
-    assert(dirp != NULL, "just checking");      // hotspot change
-    if (dirp->handle == INVALID_HANDLE_VALUE) {
-        return 0;
+  assert(dirp != NULL, "just checking");      // hotspot change
+  if (dirp->handle == INVALID_HANDLE_VALUE) {
+    return 0;
+  }
+
+  strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
+
+  if (!FindNextFile(dirp->handle, &dirp->find_data)) {
+    if (GetLastError() == ERROR_INVALID_HANDLE) {
+      errno = EBADF;
+      return 0;
     }
-
-    strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
-
-    if (!FindNextFile(dirp->handle, &dirp->find_data)) {
-        if (GetLastError() == ERROR_INVALID_HANDLE) {
-            errno = EBADF;
-            return 0;
-        }
-        FindClose(dirp->handle);
-        dirp->handle = INVALID_HANDLE_VALUE;
-    }
-
-    return &dirp->dirent;
+    FindClose(dirp->handle);
+    dirp->handle = INVALID_HANDLE_VALUE;
+  }
+
+  return &dirp->dirent;
 }
 
 int
 os::closedir(DIR *dirp)
 {
-    assert(dirp != NULL, "just checking");      // hotspot change
-    if (dirp->handle != INVALID_HANDLE_VALUE) {
-        if (!FindClose(dirp->handle)) {
-            errno = EBADF;
-            return -1;
-        }
-        dirp->handle = INVALID_HANDLE_VALUE;
+  assert(dirp != NULL, "just checking");      // hotspot change
+  if (dirp->handle != INVALID_HANDLE_VALUE) {
+    if (!FindClose(dirp->handle)) {
+      errno = EBADF;
+      return -1;
     }
-    free(dirp->path, mtInternal);
-    free(dirp, mtInternal);
-    return 0;
+    dirp->handle = INVALID_HANDLE_VALUE;
+  }
+  free(dirp->path, mtInternal);
+  free(dirp, mtInternal);
+  return 0;
 }
 
 // This must be hard coded because it's the system's temporary
@@ -1290,11 +1290,11 @@
   hmod = GetModuleHandle("NTDLL.DLL");
   if (hmod == NULL) return false;
   if (!os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod,
-                               &minfo, sizeof(MODULEINFO)) )
+                                          &minfo, sizeof(MODULEINFO)) )
     return false;
 
   if ((addr >= minfo.lpBaseOfDll) &&
-       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage)))
+      (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage)))
     return true;
   else
     return false;
@@ -1338,9 +1338,9 @@
 
   DWORD size_needed;
   if (!os::PSApiDll::EnumProcessModules(hProcess, modules,
-                           sizeof(modules), &size_needed)) {
-      CloseHandle(hProcess);
-      return 0;
+                                        sizeof(modules), &size_needed)) {
+    CloseHandle(hProcess);
+    return 0;
   }
 
   // number of modules that are currently loaded
@@ -1349,15 +1349,15 @@
   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
     // Get Full pathname:
     if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
-                             filename, sizeof(filename))) {
-        filename[0] = '\0';
+                                           filename, sizeof(filename))) {
+      filename[0] = '\0';
     }
 
     MODULEINFO modinfo;
     if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i],
-                               &modinfo, sizeof(modinfo))) {
-        modinfo.lpBaseOfDll = NULL;
-        modinfo.SizeOfImage = 0;
+                                            &modinfo, sizeof(modinfo))) {
+      modinfo.lpBaseOfDll = NULL;
+      modinfo.SizeOfImage = 0;
     }
 
     // Invoke callback function
@@ -1385,7 +1385,7 @@
   // Get a handle to a Toolhelp snapshot of the system
   hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid);
   if (hSnapShot == INVALID_HANDLE_VALUE) {
-      return FALSE;
+    return FALSE;
   }
 
   // iterate through all modules
@@ -1416,27 +1416,27 @@
 }
 
 struct _modinfo {
-   address addr;
-   char*   full_path;   // point to a char buffer
-   int     buflen;      // size of the buffer
-   address base_addr;
+  address addr;
+  char*   full_path;   // point to a char buffer
+  int     buflen;      // size of the buffer
+  address base_addr;
 };
 
 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr,
                                   unsigned size, void * param) {
-   struct _modinfo *pmod = (struct _modinfo *)param;
-   if (!pmod) return -1;
-
-   if (base_addr     <= pmod->addr &&
-       base_addr+size > pmod->addr) {
-     // if a buffer is provided, copy path name to the buffer
-     if (pmod->full_path) {
-       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
-     }
-     pmod->base_addr = base_addr;
-     return 1;
-   }
-   return 0;
+  struct _modinfo *pmod = (struct _modinfo *)param;
+  if (!pmod) return -1;
+
+  if (base_addr     <= pmod->addr &&
+      base_addr+size > pmod->addr) {
+    // if a buffer is provided, copy path name to the buffer
+    if (pmod->full_path) {
+      jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
+    }
+    pmod->base_addr = base_addr;
+    return 1;
+  }
+  return 0;
 }
 
 bool os::dll_address_to_library_name(address addr, char* buf,
@@ -1480,16 +1480,16 @@
 
 // save the start and end address of jvm.dll into param[0] and param[1]
 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr,
-                    unsigned size, void * param) {
-   if (!param) return -1;
-
-   if (base_addr     <= (address)_locate_jvm_dll &&
-       base_addr+size > (address)_locate_jvm_dll) {
-         ((address*)param)[0] = base_addr;
-         ((address*)param)[1] = base_addr + size;
-         return 1;
-   }
-   return 0;
+                           unsigned size, void * param) {
+  if (!param) return -1;
+
+  if (base_addr     <= (address)_locate_jvm_dll &&
+      base_addr+size > (address)_locate_jvm_dll) {
+    ((address*)param)[0] = base_addr;
+    ((address*)param)[1] = base_addr + size;
+    return 1;
+  }
+  return 0;
 }
 
 address vm_lib_location[2];    // start and end address of jvm.dll
@@ -1510,13 +1510,13 @@
 // print module info; param is outputStream*
 static int _print_module(int pid, char* fname, address base,
                          unsigned size, void* param) {
-   if (!param) return -1;
-
-   outputStream* st = (outputStream*)param;
-
-   address end_addr = base + size;
-   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname);
-   return 0;
+  if (!param) return -1;
+
+  outputStream* st = (outputStream*)param;
+
+  address end_addr = base + size;
+  st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname);
+  return 0;
 }
 
 // Loads .dll/.so and
@@ -1556,24 +1556,24 @@
   uint32_t signature_offset;
   uint16_t lib_arch=0;
   bool failed_to_get_lib_arch=
-  (
+    (
     //Go to position 3c in the dll
-    (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0)
-    ||
-    // Read loacation of signature
-    (sizeof(signature_offset)!=
-      (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset))))
-    ||
-    //Go to COFF File Header in dll
-    //that is located after"signature" (4 bytes long)
-    (os::seek_to_file_offset(file_descriptor,
-      signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0)
-    ||
-    //Read field that contains code of architecture
-    // that dll was build for
-    (sizeof(lib_arch)!=
-      (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch))))
-  );
+     (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0)
+     ||
+     // Read loacation of signature
+     (sizeof(signature_offset)!=
+     (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset))))
+     ||
+     //Go to COFF File Header in dll
+     //that is located after"signature" (4 bytes long)
+     (os::seek_to_file_offset(file_descriptor,
+     signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0)
+     ||
+     //Read field that contains code of architecture
+     // that dll was build for
+     (sizeof(lib_arch)!=
+     (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch))))
+    );
 
   ::close(file_descriptor);
   if (failed_to_get_lib_arch)
@@ -1594,11 +1594,11 @@
     {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
   };
   #if   (defined _M_IA64)
-    static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64;
+  static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64;
   #elif (defined _M_AMD64)
-    static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64;
+  static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64;
   #elif (defined _M_IX86)
-    static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386;
+  static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386;
   #else
     #error Method os::dll_load requires that one of following \
            is defined :_M_IA64,_M_AMD64 or _M_IX86
@@ -1618,7 +1618,7 @@
   }
 
   assert(running_arch_str,
-    "Didn't find runing architecture code in arch_array");
+         "Didn't find runing architecture code in arch_array");
 
   // If the architure is right
   // but some other error took place - report os::lasterror(...) msg
@@ -1630,15 +1630,15 @@
   if (lib_arch_str!=NULL)
   {
     ::_snprintf(ebuf, ebuflen-1,
-      "Can't load %s-bit .dll on a %s-bit platform",
-      lib_arch_str,running_arch_str);
+                "Can't load %s-bit .dll on a %s-bit platform",
+                lib_arch_str,running_arch_str);
   }
   else
   {
     // don't know what architecture this dll was build for
     ::_snprintf(ebuf, ebuflen-1,
-      "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
-      lib_arch,running_arch_str);
+                "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
+                lib_arch,running_arch_str);
   }
 
   return NULL;
@@ -1646,9 +1646,9 @@
 
 
 void os::print_dll_info(outputStream *st) {
-   int pid = os::current_process_id();
-   st->print_cr("Dynamic libraries:");
-   enumerate_modules(pid, _print_module, (void *)st);
+  int pid = os::current_process_id();
+  st->print_cr("Dynamic libraries:");
+  enumerate_modules(pid, _print_module, (void *)st);
 }
 
 void os::print_os_info_brief(outputStream* st) {
@@ -1785,13 +1785,13 @@
 
   if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
       er->NumberParameters >= 2) {
-      switch (er->ExceptionInformation[0]) {
-      case 0: st->print(", reading address"); break;
-      case 1: st->print(", writing address"); break;
-      default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
-                            er->ExceptionInformation[0]);
-      }
-      st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
+    switch (er->ExceptionInformation[0]) {
+    case 0: st->print(", reading address"); break;
+    case 1: st->print(", writing address"); break;
+    default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
+                       er->ExceptionInformation[0]);
+    }
+    st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
   } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR &&
              er->NumberParameters >= 2 && UseSharedSpaces) {
     FileMapInfo* mapinfo = FileMapInfo::current_info();
@@ -1886,13 +1886,13 @@
   if ((errval = GetLastError()) != 0) {
     // DOS error
     size_t n = (size_t)FormatMessage(
-          FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
-          NULL,
-          errval,
-          0,
-          buf,
-          (DWORD)len,
-          NULL);
+                                     FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
+                                     NULL,
+                                     errval,
+                                     0,
+                                     buf,
+                                     (DWORD)len,
+                                     NULL);
     if (n > 3) {
       // Drop final '.', CR, LF
       if (buf[n - 1] == '\n') n--;
@@ -1961,44 +1961,44 @@
 //
 static BOOL WINAPI consoleHandler(DWORD event) {
   switch (event) {
-    case CTRL_C_EVENT:
-      if (is_error_reported()) {
-        // Ctrl-C is pressed during error reporting, likely because the error
-        // handler fails to abort. Let VM die immediately.
-        os::die();
-      }
-
-      os::signal_raise(SIGINT);
-      return TRUE;
-      break;
-    case CTRL_BREAK_EVENT:
-      if (sigbreakHandler != NULL) {
-        (*sigbreakHandler)(SIGBREAK);
-      }
-      return TRUE;
-      break;
-    case CTRL_LOGOFF_EVENT: {
-      // Don't terminate JVM if it is running in a non-interactive session,
-      // such as a service process.
-      USEROBJECTFLAGS flags;
-      HANDLE handle = GetProcessWindowStation();
-      if (handle != NULL &&
-          GetUserObjectInformation(handle, UOI_FLAGS, &flags,
-            sizeof(USEROBJECTFLAGS), NULL)) {
-        // If it is a non-interactive session, let next handler to deal
-        // with it.
-        if ((flags.dwFlags & WSF_VISIBLE) == 0) {
-          return FALSE;
-        }
+  case CTRL_C_EVENT:
+    if (is_error_reported()) {
+      // Ctrl-C is pressed during error reporting, likely because the error
+      // handler fails to abort. Let VM die immediately.
+      os::die();
+    }
+
+    os::signal_raise(SIGINT);
+    return TRUE;
+    break;
+  case CTRL_BREAK_EVENT:
+    if (sigbreakHandler != NULL) {
+      (*sigbreakHandler)(SIGBREAK);
+    }
+    return TRUE;
+    break;
+  case CTRL_LOGOFF_EVENT: {
+    // Don't terminate JVM if it is running in a non-interactive session,
+    // such as a service process.
+    USEROBJECTFLAGS flags;
+    HANDLE handle = GetProcessWindowStation();
+    if (handle != NULL &&
+        GetUserObjectInformation(handle, UOI_FLAGS, &flags,
+        sizeof(USEROBJECTFLAGS), NULL)) {
+      // If it is a non-interactive session, let next handler to deal
+      // with it.
+      if ((flags.dwFlags & WSF_VISIBLE) == 0) {
+        return FALSE;
       }
     }
-    case CTRL_CLOSE_EVENT:
-    case CTRL_SHUTDOWN_EVENT:
-      os::signal_raise(SIGTERM);
-      return TRUE;
-      break;
-    default:
-      break;
+  }
+  case CTRL_CLOSE_EVENT:
+  case CTRL_SHUTDOWN_EVENT:
+    os::signal_raise(SIGTERM);
+    return TRUE;
+    break;
+  default:
+    break;
   }
   return FALSE;
 }
@@ -2221,8 +2221,8 @@
 const char* os::exception_name(int exception_code, char *buf, size_t size) {
   for (int i = 0; exceptlabels[i].name != NULL; i++) {
     if (exceptlabels[i].number == exception_code) {
-       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
-       return buf;
+      jio_snprintf(buf, size, "%s", exceptlabels[i].name);
+      return buf;
     }
   }
 
@@ -2269,21 +2269,21 @@
   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
 
   switch (exception_code) {
-    case EXCEPTION_FLT_DENORMAL_OPERAND:
-    case EXCEPTION_FLT_DIVIDE_BY_ZERO:
-    case EXCEPTION_FLT_INEXACT_RESULT:
-    case EXCEPTION_FLT_INVALID_OPERATION:
-    case EXCEPTION_FLT_OVERFLOW:
-    case EXCEPTION_FLT_STACK_CHECK:
-    case EXCEPTION_FLT_UNDERFLOW:
-      jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
-      if (fp_control_word != ctx->FloatSave.ControlWord) {
-        // Restore FPCW and mask out FLT exceptions
-        ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
-        // Mask out pending FLT exceptions
-        ctx->FloatSave.StatusWord &=  0xffffff00;
-        return EXCEPTION_CONTINUE_EXECUTION;
-      }
+  case EXCEPTION_FLT_DENORMAL_OPERAND:
+  case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+  case EXCEPTION_FLT_INEXACT_RESULT:
+  case EXCEPTION_FLT_INVALID_OPERATION:
+  case EXCEPTION_FLT_OVERFLOW:
+  case EXCEPTION_FLT_STACK_CHECK:
+  case EXCEPTION_FLT_UNDERFLOW:
+    jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
+    if (fp_control_word != ctx->FloatSave.ControlWord) {
+      // Restore FPCW and mask out FLT exceptions
+      ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
+      // Mask out pending FLT exceptions
+      ctx->FloatSave.StatusWord &=  0xffffff00;
+      return EXCEPTION_CONTINUE_EXECUTION;
+    }
   }
 
   if (prev_uef_handler != NULL) {
@@ -2296,13 +2296,13 @@
   On Windows, the mxcsr control bits are non-volatile across calls
   See also CR 6192333
   */
-      jint MxCsr = INITIAL_MXCSR;
-        // we can't use StubRoutines::addr_mxcsr_std()
-        // because in Win64 mxcsr is not saved there
-      if (MxCsr != ctx->MxCsr) {
-        ctx->MxCsr = MxCsr;
-        return EXCEPTION_CONTINUE_EXECUTION;
-      }
+  jint MxCsr = INITIAL_MXCSR;
+  // we can't use StubRoutines::addr_mxcsr_std()
+  // because in Win64 mxcsr is not saved there
+  if (MxCsr != ctx->MxCsr) {
+    ctx->MxCsr = MxCsr;
+    return EXCEPTION_CONTINUE_EXECUTION;
+  }
 #endif // !_WIN64
 
   return EXCEPTION_CONTINUE_SEARCH;
@@ -2488,7 +2488,7 @@
           thread->enable_register_stack_red_zone();
 
           return Handle_Exception(exceptionInfo,
-            SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
+                                  SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
         }
 #endif
         if (thread->stack_yellow_zone_enabled()) {
@@ -2498,8 +2498,8 @@
           thread->disable_stack_yellow_zone();
           // If not in java code, return and hope for the best.
           return in_java ? Handle_Exception(exceptionInfo,
-            SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
-            :  EXCEPTION_CONTINUE_EXECUTION;
+                                            SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
+                                            :  EXCEPTION_CONTINUE_EXECUTION;
         } else {
           // Fatal red zone violation.
           thread->disable_stack_red_zone();
@@ -2513,7 +2513,7 @@
         // a one-time-only guard page, which it has released to us.  The next
         // stack overflow on this thread will result in an ACCESS_VIOLATION.
         return Handle_Exception(exceptionInfo,
-          SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
+                                SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
       } else {
         // Can only return and hope for the best.  Further stack growth will
         // result in an ACCESS_VIOLATION.
@@ -2528,9 +2528,9 @@
         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
           // Stack overflow.
           assert(!os::uses_stack_guard_pages(),
-            "should be caught by red zone code above.");
+                 "should be caught by red zone code above.");
           return Handle_Exception(exceptionInfo,
-            SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
+                                  SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
         }
         //
         // Check for safepoint polling and implicit null
@@ -2552,11 +2552,11 @@
           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
           address addr = (address) exceptionRecord->ExceptionInformation[1];
           if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) {
-                  addr = (address)((uintptr_t)addr &
-                         (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
-                  os::commit_memory((char *)addr, thread->stack_base() - addr,
-                                    !ExecMem);
-                  return EXCEPTION_CONTINUE_EXECUTION;
+            addr = (address)((uintptr_t)addr &
+                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
+            os::commit_memory((char *)addr, thread->stack_base() - addr,
+                              !ExecMem);
+            return EXCEPTION_CONTINUE_EXECUTION;
           }
           else
 #endif
@@ -2578,7 +2578,7 @@
                                 *(bundle_start + 1), *bundle_start);
                 }
                 return Handle_Exception(exceptionInfo,
-                  SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
+                                        SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
               }
             }
 
@@ -2632,7 +2632,7 @@
       // 1. must be first instruction in bundle
       // 2. must be a break instruction with appropriate code
       if ((((uint64_t) pc & 0x0F) == 0) &&
-         (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
+          (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
         return Handle_Exception(exceptionInfo,
                                 (address)SharedRuntime::get_handle_wrong_method_stub());
       }
@@ -2651,8 +2651,8 @@
       } // switch
     }
     if (((thread->thread_state() == _thread_in_Java) ||
-        (thread->thread_state() == _thread_in_native)) &&
-        exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION)
+         (thread->thread_state() == _thread_in_native)) &&
+         exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION)
     {
       LONG result=Handle_FLT_Exception(exceptionInfo);
       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
@@ -2704,15 +2704,15 @@
 
 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
   switch (type) {
-    case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
-    case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
-    case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
-    case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
-    case T_INT:     return (address)jni_fast_GetIntField_wrapper;
-    case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
-    case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
-    case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
-    default:        ShouldNotReachHere();
+  case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
+  case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
+  case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
+  case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
+  case T_INT:     return (address)jni_fast_GetIntField_wrapper;
+  case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
+  case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
+  case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
+  default:        ShouldNotReachHere();
   }
   return (address)-1;
 }
@@ -2724,7 +2724,7 @@
   __try {
     (*funcPtr)();
   } __except(topLevelExceptionFilter(
-             (_EXCEPTION_POINTERS*)_exception_info())) {
+                                     (_EXCEPTION_POINTERS*)_exception_info())) {
     // Nothing to do.
   }
 }
@@ -2763,7 +2763,7 @@
 
 // Container for NUMA node list info
 class NUMANodeListHolder {
-private:
+ private:
   int *_numa_used_node_list;  // allocated below
   int _numa_used_node_count;
 
@@ -2773,7 +2773,7 @@
     }
   }
 
-public:
+ public:
   NUMANodeListHolder() {
     _numa_used_node_count = 0;
     _numa_used_node_list = NULL;
@@ -2821,7 +2821,7 @@
 
 static bool request_lock_memory_privilege() {
   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
-                                os::current_process_id());
+                          os::current_process_id());
 
   LUID luid;
   if (_hProcess != NULL &&
@@ -2981,7 +2981,7 @@
         // need to create a dummy 'reserve' record to match
         // the release.
         MemTracker::record_virtual_memory_reserve((address)p_buf,
-          bytes_to_release, CALLER_PC);
+                                                  bytes_to_release, CALLER_PC);
         os::release_memory(p_buf, bytes_to_release);
       }
 #ifdef ASSERT
@@ -3065,7 +3065,7 @@
 // all or nothing deal.  When we split a reservation, we must break the
 // reservation into two reservations.
 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
-                              bool realloc) {
+                                  bool realloc) {
   if (size > 0) {
     release_memory(base, size);
     if (realloc) {
@@ -3082,7 +3082,7 @@
 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
-      "Alignment must be a multiple of allocation granularity (page size)");
+         "Alignment must be a multiple of allocation granularity (page size)");
   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
 
   size_t extra_size = size + alignment;
@@ -3176,7 +3176,7 @@
   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
     if (TracePageSizes && Verbose) {
-       tty->print_cr("Reserving large pages individually.");
+      tty->print_cr("Reserving large pages individually.");
     }
     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
     if (p_buf == NULL) {
@@ -3195,7 +3195,7 @@
 
   } else {
     if (TracePageSizes && Verbose) {
-       tty->print_cr("Reserving large pages in a single large chunk.");
+      tty->print_cr("Reserving large pages in a single large chunk.");
     }
     // normal policy just allocate it all at once
     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
@@ -3288,7 +3288,7 @@
 }
 
 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
-                       bool exec) {
+                          bool exec) {
   // alignment_hint is ignored on this OS
   return pd_commit_memory(addr, size, exec);
 }
@@ -3436,9 +3436,9 @@
   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
   // resolution timers running.
-private:
-    jlong resolution;
-public:
+ private:
+  jlong resolution;
+ public:
   HighResolutionInterval(jlong ms) {
     resolution = ms % 10L;
     if (resolution != 0) {
@@ -3684,7 +3684,7 @@
 julong os::win32::_physical_memory    = 0;
 size_t os::win32::_default_stack_size = 0;
 
-         intx os::win32::_os_thread_limit    = 0;
+intx os::win32::_os_thread_limit    = 0;
 volatile intx os::win32::_os_thread_count    = 0;
 
 bool   os::win32::_is_nt              = false;
@@ -3714,27 +3714,27 @@
   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
   GetVersionEx((OSVERSIONINFO*)&oi);
   switch (oi.dwPlatformId) {
-    case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
-    case VER_PLATFORM_WIN32_NT:
-      _is_nt = true;
-      {
-        int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
-        if (os_vers == 5002) {
-          _is_windows_2003 = true;
-        }
-        if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
+  case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
+  case VER_PLATFORM_WIN32_NT:
+    _is_nt = true;
+    {
+      int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
+      if (os_vers == 5002) {
+        _is_windows_2003 = true;
+      }
+      if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
           oi.wProductType == VER_NT_SERVER) {
-            _is_windows_server = true;
-        }
+        _is_windows_server = true;
       }
-      break;
-    default: fatal("Unknown platform");
+    }
+    break;
+  default: fatal("Unknown platform");
   }
 
   _default_stack_size = os::current_stack_size();
   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
-    "stack size not a multiple of page size");
+         "stack size not a multiple of page size");
 
   initialize_performance_counter();
 
@@ -3760,7 +3760,7 @@
   assert(strchr(name, ':') == NULL, "path not allowed");
   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
     jio_snprintf(ebuf, ebuflen,
-      "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
+                 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
     return NULL;
   }
 
@@ -3783,7 +3783,7 @@
   }
 
   jio_snprintf(ebuf, ebuflen,
-    "os::win32::load_windows_dll() cannot load %s from system directories.", name);
+               "os::win32::load_windows_dll() cannot load %s from system directories.", name);
   return NULL;
 }
 
@@ -3872,11 +3872,11 @@
 
   // This may be overridden later when argument processing is done.
   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
-    os::win32::is_windows_2003());
+                os::win32::is_windows_2003());
 
   // Initialize main_process and main_thread
   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
- if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
+  if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
     fatal("DuplicateHandle failed\n");
   }
@@ -3959,7 +3959,7 @@
   // class initialization depending on 32 or 64 bit VM.
   size_t min_stack_allowed =
             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
-            2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
+                     2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
   if (actual_reserve_size < min_stack_allowed) {
     tty->print_cr("\nThe stack size specified is too small, "
                   "Specify at least %dk",
@@ -4120,14 +4120,14 @@
     FILETIME UserTime;
 
     if (GetThreadTimes(thread->osthread()->thread_handle(),
-                    &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
+                       &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
       return -1;
     else
       if (user_sys_cpu_time) {
         return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
-      } else {
-        return FT2INT64(UserTime) * 100;
-      }
+    } else {
+      return FT2INT64(UserTime) * 100;
+    }
   } else {
     return (jlong) timeGetTime() * 1000000;
   }
@@ -4156,7 +4156,7 @@
     FILETIME UserTime;
 
     if (GetThreadTimes(GetCurrentThread(),
-                    &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
+                       &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
       return false;
     else
       return true;
@@ -4204,7 +4204,7 @@
 
   if (strlen(path) > MAX_PATH - 1) {
     errno = ENAMETOOLONG;
-          return -1;
+    return -1;
   }
   os::native_path(strcpy(pathbuf, path));
   return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
@@ -4270,9 +4270,9 @@
 
   /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */
   assert(((!::IsDBCSLeadByte('/'))
-    && (!::IsDBCSLeadByte('\\'))
-    && (!::IsDBCSLeadByte(':'))),
-    "Illegal lead byte");
+          && (!::IsDBCSLeadByte('\\'))
+          && (!::IsDBCSLeadByte(':'))),
+          "Illegal lead byte");
 
   /* Check for leading separators */
 #define isfilesep(c) ((c) == '/' || (c) == '\\')
@@ -4350,8 +4350,8 @@
 
   /* For "z:", add "." to work around a bug in the C runtime library */
   if (colon == dst - 1) {
-          path[2] = '.';
-          path[3] = '\0';
+    path[2] = '.';
+    path[3] = '\0';
   }
 
   return path;
@@ -4371,7 +4371,7 @@
 
   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
-      return -1;
+    return -1;
   }
 
   if (::SetEndOfFile(h) == FALSE) {
@@ -4390,7 +4390,7 @@
   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
 
   if ((!::FlushFileBuffers(handle)) &&
-         (GetLastError() != ERROR_ACCESS_DENIED) ) {
+      (GetLastError() != ERROR_ACCESS_DENIED) ) {
     /* from winerror.h */
     return -1;
   }
@@ -4484,7 +4484,7 @@
   INPUT_RECORD *lpBuffer;     /* Pointer to records of input events */
 
   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
-        return FALSE;
+    return FALSE;
   }
 
   /* Construct an array of input records in the console buffer */
@@ -4535,8 +4535,8 @@
 
 // Map a block of memory.
 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
-                     char *addr, size_t bytes, bool read_only,
-                     bool allow_exec) {
+                        char *addr, size_t bytes, bool read_only,
+                        bool allow_exec) {
   HANDLE hFile;
   char* base;
 
@@ -4655,8 +4655,8 @@
 
 // Remap a block of memory.
 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
-                       char *addr, size_t bytes, bool read_only,
-                       bool allow_exec) {
+                          char *addr, size_t bytes, bool read_only,
+                          bool allow_exec) {
   // This OS does not allow existing memory maps to be remapped so we
   // have to unmap the memory before we remap it.
   if (!os::unmap_memory(addr, bytes)) {
@@ -4668,7 +4668,7 @@
   // code may be able to access an address that is no longer mapped.
 
   return os::map_memory(fd, file_name, file_offset, addr, bytes,
-           read_only, allow_exec);
+                        read_only, allow_exec);
 }
 
 
@@ -4704,7 +4704,7 @@
     }
   } else {
     jio_fprintf(stderr,
-      "Could not open pause file '%s', continuing immediately.\n", filename);
+                "Could not open pause file '%s', continuing immediately.\n", filename);
   }
 }
 
@@ -4722,7 +4722,7 @@
 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
-      "crash_protection already set?");
+         "crash_protection already set?");
 
   bool success = true;
   __try {
@@ -4788,85 +4788,85 @@
 // with explicit "PARKED" and "SIGNALED" bits.
 
 int os::PlatformEvent::park (jlong Millis) {
-    guarantee(_ParkHandle != NULL , "Invariant");
-    guarantee(Millis > 0          , "Invariant");
-    int v;
-
-    // CONSIDER: defer assigning a CreateEvent() handle to the Event until
-    // the initial park() operation.
-
-    for (;;) {
-        v = _Event;
-        if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+  guarantee(_ParkHandle != NULL , "Invariant");
+  guarantee(Millis > 0          , "Invariant");
+  int v;
+
+  // CONSIDER: defer assigning a CreateEvent() handle to the Event until
+  // the initial park() operation.
+
+  for (;;) {
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+  }
+  guarantee((v == 0) || (v == 1), "invariant");
+  if (v != 0) return OS_OK;
+
+  // Do this the hard way by blocking ...
+  // TODO: consider a brief spin here, gated on the success of recent
+  // spin attempts by this thread.
+  //
+  // We decompose long timeouts into series of shorter timed waits.
+  // Evidently large timo values passed in WaitForSingleObject() are problematic on some
+  // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
+  // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
+  // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
+  // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
+  // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
+  // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
+  // for the already waited time.  This policy does not admit any new outcomes.
+  // In the future, however, we might want to track the accumulated wait time and
+  // adjust Millis accordingly if we encounter a spurious wakeup.
+
+  const int MAXTIMEOUT = 0x10000000;
+  DWORD rv = WAIT_TIMEOUT;
+  while (_Event < 0 && Millis > 0) {
+    DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
+    if (Millis > MAXTIMEOUT) {
+      prd = MAXTIMEOUT;
     }
-    guarantee((v == 0) || (v == 1), "invariant");
-    if (v != 0) return OS_OK;
-
-    // Do this the hard way by blocking ...
-    // TODO: consider a brief spin here, gated on the success of recent
-    // spin attempts by this thread.
-    //
-    // We decompose long timeouts into series of shorter timed waits.
-    // Evidently large timo values passed in WaitForSingleObject() are problematic on some
-    // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
-    // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
-    // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
-    // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
-    // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
-    // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
-    // for the already waited time.  This policy does not admit any new outcomes.
-    // In the future, however, we might want to track the accumulated wait time and
-    // adjust Millis accordingly if we encounter a spurious wakeup.
-
-    const int MAXTIMEOUT = 0x10000000;
-    DWORD rv = WAIT_TIMEOUT;
-    while (_Event < 0 && Millis > 0) {
-       DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
-       if (Millis > MAXTIMEOUT) {
-          prd = MAXTIMEOUT;
-       }
-       rv = ::WaitForSingleObject(_ParkHandle, prd);
-       assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
-       if (rv == WAIT_TIMEOUT) {
-           Millis -= prd;
-       }
+    rv = ::WaitForSingleObject(_ParkHandle, prd);
+    assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
+    if (rv == WAIT_TIMEOUT) {
+      Millis -= prd;
     }
-    v = _Event;
-    _Event = 0;
-    // see comment at end of os::PlatformEvent::park() below:
-    OrderAccess::fence();
-    // If we encounter a nearly simultanous timeout expiry and unpark()
-    // we return OS_OK indicating we awoke via unpark().
-    // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
-    return (v >= 0) ? OS_OK : OS_TIMEOUT;
+  }
+  v = _Event;
+  _Event = 0;
+  // see comment at end of os::PlatformEvent::park() below:
+  OrderAccess::fence();
+  // If we encounter a nearly simultanous timeout expiry and unpark()
+  // we return OS_OK indicating we awoke via unpark().
+  // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
+  return (v >= 0) ? OS_OK : OS_TIMEOUT;
 }
 
 void os::PlatformEvent::park() {
-    guarantee(_ParkHandle != NULL, "Invariant");
-    // Invariant: Only the thread associated with the Event/PlatformEvent
-    // may call park().
-    int v;
-    for (;;) {
-        v = _Event;
-        if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
-    }
-    guarantee((v == 0) || (v == 1), "invariant");
-    if (v != 0) return;
-
-    // Do this the hard way by blocking ...
-    // TODO: consider a brief spin here, gated on the success of recent
-    // spin attempts by this thread.
-    while (_Event < 0) {
-       DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
-       assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
-    }
-
-    // Usually we'll find _Event == 0 at this point, but as
-    // an optional optimization we clear it, just in case can
-    // multiple unpark() operations drove _Event up to 1.
-    _Event = 0;
-    OrderAccess::fence();
-    guarantee(_Event >= 0, "invariant");
+  guarantee(_ParkHandle != NULL, "Invariant");
+  // Invariant: Only the thread associated with the Event/PlatformEvent
+  // may call park().
+  int v;
+  for (;;) {
+    v = _Event;
+    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+  }
+  guarantee((v == 0) || (v == 1), "invariant");
+  if (v != 0) return;
+
+  // Do this the hard way by blocking ...
+  // TODO: consider a brief spin here, gated on the success of recent
+  // spin attempts by this thread.
+  while (_Event < 0) {
+    DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
+    assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
+  }
+
+  // Usually we'll find _Event == 0 at this point, but as
+  // an optional optimization we clear it, just in case can
+  // multiple unpark() operations drove _Event up to 1.
+  _Event = 0;
+  OrderAccess::fence();
+  guarantee(_Event >= 0, "invariant");
 }
 
 void os::PlatformEvent::unpark() {
@@ -4929,7 +4929,7 @@
 
   // Don't wait if interrupted or already triggered
   if (Thread::is_interrupted(thread, false) ||
-    WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
+      WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
     ResetEvent(_ParkEvent);
     return;
   }
@@ -5057,13 +5057,13 @@
 
   if (!os::WinSock2Dll::WinSock2Available()) {
     jio_fprintf(stderr, "Could not load Winsock (error: %d)\n",
-      ::GetLastError());
+                ::GetLastError());
     return JNI_ERR;
   }
 
   if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
-      ::GetLastError());
+                ::GetLastError());
     return JNI_ERR;
   }
   return JNI_OK;
@@ -5244,7 +5244,7 @@
 BOOL                        os::Kernel32Dll::initialized = FALSE;
 SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
   assert(initialized && _GetLargePageMinimum != NULL,
-    "GetLargePageMinimumAvailable() not yet called");
+         "GetLargePageMinimumAvailable() not yet called");
   return _GetLargePageMinimum();
 }
 
@@ -5264,37 +5264,37 @@
 
 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) {
   assert(initialized && _VirtualAllocExNuma != NULL,
-    "NUMACallsAvailable() not yet called");
+         "NUMACallsAvailable() not yet called");
 
   return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node);
 }
 
 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) {
   assert(initialized && _GetNumaHighestNodeNumber != NULL,
-    "NUMACallsAvailable() not yet called");
+         "NUMACallsAvailable() not yet called");
 
   return _GetNumaHighestNodeNumber(ptr_highest_node_number);
 }
 
 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) {
   assert(initialized && _GetNumaNodeProcessorMask != NULL,
-    "NUMACallsAvailable() not yet called");
+         "NUMACallsAvailable() not yet called");
 
   return _GetNumaNodeProcessorMask(node, proc_mask);
 }
 
 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip,
-  ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) {
-    if (!initialized) {
-      initialize();
-    }
-
-    if (_RtlCaptureStackBackTrace != NULL) {
-      return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture,
-        BackTrace, BackTraceHash);
-    } else {
-      return 0;
-    }
+                                                 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) {
+  if (!initialized) {
+    initialize();
+  }
+
+  if (_RtlCaptureStackBackTrace != NULL) {
+    return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture,
+                                     BackTrace, BackTraceHash);
+  } else {
+    return 0;
+  }
 }
 
 void os::Kernel32Dll::initializeCommon() {
@@ -5328,7 +5328,7 @@
   return true;
 }
 
-  // Help tools
+// Help tools
 inline BOOL os::Kernel32Dll::HelpToolsAvailable() {
   return true;
 }
@@ -5387,15 +5387,15 @@
 
 // Advapi API
 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
-   BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
-   PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
-     return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
-       BufferLength, PreviousState, ReturnLength);
+                                                   BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
+                                                   PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
+  return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
+                                 BufferLength, PreviousState, ReturnLength);
 }
 
 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
-  PHANDLE TokenHandle) {
-    return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
+                                              PHANDLE TokenHandle) {
+  return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
 }
 
 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
@@ -5508,7 +5508,7 @@
 
 BOOL os::Kernel32Dll::SwitchToThread() {
   assert(initialized && _SwitchToThread != NULL,
-    "SwitchToThreadAvailable() not yet called");
+         "SwitchToThreadAvailable() not yet called");
   return _SwitchToThread();
 }
 
@@ -5532,21 +5532,21 @@
 
 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
   assert(initialized && _CreateToolhelp32Snapshot != NULL,
-    "HelpToolsAvailable() not yet called");
+         "HelpToolsAvailable() not yet called");
 
   return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
 }
 
 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
   assert(initialized && _Module32First != NULL,
-    "HelpToolsAvailable() not yet called");
+         "HelpToolsAvailable() not yet called");
 
   return _Module32First(hSnapshot, lpme);
 }
 
 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
   assert(initialized && _Module32Next != NULL,
-    "HelpToolsAvailable() not yet called");
+         "HelpToolsAvailable() not yet called");
 
   return _Module32Next(hSnapshot, lpme);
 }
@@ -5561,7 +5561,7 @@
 
 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
   assert(initialized && _GetNativeSystemInfo != NULL,
-    "GetNativeSystemInfoAvailable() not yet called");
+         "GetNativeSystemInfoAvailable() not yet called");
 
   _GetNativeSystemInfo(lpSystemInfo);
 }
@@ -5583,11 +5583,11 @@
     HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0);
     if (handle != NULL) {
       _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle,
-        "EnumProcessModules");
+                                                                    "EnumProcessModules");
       _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle,
-        "GetModuleFileNameExA");
+                                                                      "GetModuleFileNameExA");
       _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle,
-        "GetModuleInformation");
+                                                                        "GetModuleInformation");
     }
     initialized = TRUE;
   }
@@ -5597,19 +5597,19 @@
 
 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
   assert(initialized && _EnumProcessModules != NULL,
-    "PSApiAvailable() not yet called");
+         "PSApiAvailable() not yet called");
   return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
 }
 
 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
   assert(initialized && _GetModuleFileNameEx != NULL,
-    "PSApiAvailable() not yet called");
+         "PSApiAvailable() not yet called");
   return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
 }
 
 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
   assert(initialized && _GetModuleInformation != NULL,
-    "PSApiAvailable() not yet called");
+         "PSApiAvailable() not yet called");
   return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
 }
 
@@ -5645,13 +5645,13 @@
 
 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
   assert(initialized && _WSAStartup != NULL,
-    "WinSock2Available() not yet called");
+         "WinSock2Available() not yet called");
   return _WSAStartup(wVersionRequested, lpWSAData);
 }
 
 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
   assert(initialized && _gethostbyname != NULL,
-    "WinSock2Available() not yet called");
+         "WinSock2Available() not yet called");
   return _gethostbyname(name);
 }
 
@@ -5677,35 +5677,35 @@
     HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0);
     if (handle != NULL) {
       _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle,
-        "AdjustTokenPrivileges");
+                                                                          "AdjustTokenPrivileges");
       _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle,
-        "OpenProcessToken");
+                                                                "OpenProcessToken");
       _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle,
-        "LookupPrivilegeValueA");
+                                                                        "LookupPrivilegeValueA");
     }
     initialized = TRUE;
   }
 }
 
 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
-   BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
-   PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
-   assert(initialized && _AdjustTokenPrivileges != NULL,
-     "AdvapiAvailable() not yet called");
-   return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
-       BufferLength, PreviousState, ReturnLength);
+                                            BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
+                                            PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
+  assert(initialized && _AdjustTokenPrivileges != NULL,
+         "AdvapiAvailable() not yet called");
+  return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
+                                BufferLength, PreviousState, ReturnLength);
 }
 
 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
-  PHANDLE TokenHandle) {
-   assert(initialized && _OpenProcessToken != NULL,
-     "AdvapiAvailable() not yet called");
-    return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
+                                       PHANDLE TokenHandle) {
+  assert(initialized && _OpenProcessToken != NULL,
+         "AdvapiAvailable() not yet called");
+  return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
 }
 
 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
-   assert(initialized && _LookupPrivilegeValue != NULL,
-     "AdvapiAvailable() not yet called");
+  assert(initialized && _LookupPrivilegeValue != NULL,
+         "AdvapiAvailable() not yet called");
   return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
 }
 
@@ -5753,7 +5753,7 @@
   if (result == NULL) {
     if (VerboseInternalVMTests) {
       gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
-        large_allocation_size);
+                          large_allocation_size);
     }
   } else {
     os::release_memory_special(result, large_allocation_size);
@@ -5766,15 +5766,15 @@
     if (actual_location == NULL) {
       if (VerboseInternalVMTests) {
         gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
-          expected_location, large_allocation_size);
+                            expected_location, large_allocation_size);
       }
     } else {
       // release memory
       os::release_memory_special(actual_location, expected_allocation_size);
       // only now check, after releasing any memory to avoid any leaks.
       assert(actual_location == expected_location,
-        err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
-          expected_location, expected_allocation_size, actual_location));
+             err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
+             expected_location, expected_allocation_size, actual_location));
     }
   }
 
--- a/hotspot/src/share/vm/runtime/atomic.hpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/share/vm/runtime/atomic.hpp	Wed Sep 10 11:48:20 2014 -0600
@@ -74,12 +74,12 @@
   inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
   inline static void*    add_ptr(intptr_t add_value, volatile void*     dest);
   // See comment above about using jlong atomics on 32-bit platforms
-         static jlong    add    (jlong    add_value, volatile jlong*    dest);
+  static jlong    add    (jlong    add_value, volatile jlong*    dest);
 
   // Atomically increment location. inc*() provide:
   // <fence> increment-dest <membar StoreLoad|StoreStore>
   inline static void inc    (volatile jint*     dest);
-         static void inc    (volatile jshort*   dest);
+  static void inc    (volatile jshort*   dest);
   inline static void inc    (volatile size_t*   dest);
   inline static void inc_ptr(volatile intptr_t* dest);
   inline static void inc_ptr(volatile void*     dest);
@@ -87,7 +87,7 @@
   // Atomically decrement a location. dec*() provide:
   // <fence> decrement-dest <membar StoreLoad|StoreStore>
   inline static void dec    (volatile jint*     dest);
-         static void dec    (volatile jshort*    dest);
+  static void dec    (volatile jshort*    dest);
   inline static void dec    (volatile size_t*   dest);
   inline static void dec_ptr(volatile intptr_t* dest);
   inline static void dec_ptr(volatile void*     dest);
@@ -96,7 +96,7 @@
   // prior value of *dest. xchg*() provide:
   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
   inline static jint         xchg(jint         exchange_value, volatile jint*         dest);
-         static unsigned int xchg(unsigned int exchange_value, volatile unsigned int* dest);
+  static unsigned int xchg(unsigned int exchange_value, volatile unsigned int* dest);
 
   inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
   inline static void*    xchg_ptr(void*    exchange_value, volatile void*   dest);
@@ -105,14 +105,14 @@
   // *dest with exchange_value if the comparison succeeded. Returns prior
   // value of *dest. cmpxchg*() provide:
   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
-         static jbyte    cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value);
+  static jbyte    cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value);
   inline static jint     cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value);
   // See comment above about using jlong atomics on 32-bit platforms
   inline static jlong    cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value);
 
-         static unsigned int cmpxchg(unsigned int exchange_value,
-                                     volatile unsigned int* dest,
-                                     unsigned int compare_value);
+  static unsigned int cmpxchg(unsigned int exchange_value,
+                              volatile unsigned int* dest,
+                              unsigned int compare_value);
 
   inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
   inline static void*    cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value);
--- a/hotspot/src/share/vm/runtime/mutex.cpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/share/vm/runtime/mutex.cpp	Wed Sep 10 11:48:20 2014 -0600
@@ -407,7 +407,7 @@
   // Diagnostic support - periodically unwedge blocked threads
   intx nmt = NativeMonitorTimeout;
   if (nmt > 0 && (nmt < timo || timo <= 0)) {
-     timo = nmt;
+    timo = nmt;
   }
   int err = OS_OK;
   if (0 == timo) {
@@ -590,7 +590,7 @@
     // as a diagnostic measure consider setting w->_ListNext = BAD
     assert(UNS(_OnDeck) == _LBIT, "invariant");
     _OnDeck = w;           // pass OnDeck to w.
-                            // w will clear OnDeck once it acquires the outer lock
+    // w will clear OnDeck once it acquires the outer lock
 
     // Another optional optimization ...
     // For heavily contended locks it's not uncommon that some other
@@ -1082,14 +1082,14 @@
   guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant");
 
   #ifdef ASSERT
-    Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
-    assert(least != this, "Specification of get_least_... call above");
-    if (least != NULL && least->rank() <= special) {
-      tty->print("Attempting to wait on monitor %s/%d while holding"
-                 " lock %s/%d -- possible deadlock",
-                 name(), rank(), least->name(), least->rank());
-      assert(false, "Shouldn't block(wait) while holding a lock of rank special");
-    }
+  Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
+  assert(least != this, "Specification of get_least_... call above");
+  if (least != NULL && least->rank() <= special) {
+    tty->print("Attempting to wait on monitor %s/%d while holding"
+               " lock %s/%d -- possible deadlock",
+               name(), rank(), least->name(), least->rank());
+    assert(false, "Shouldn't block(wait) while holding a lock of rank special");
+  }
   #endif // ASSERT
 
   int wait_status;
@@ -1173,8 +1173,8 @@
 Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
   ClearMonitor((Monitor *) this, name);
 #ifdef ASSERT
- _allow_vm_block   = allow_vm_block;
- _rank             = Rank;
+  _allow_vm_block   = allow_vm_block;
+  _rank             = Rank;
 #endif
 }
 
@@ -1280,38 +1280,38 @@
     // link "this" into the owned locks list
 
     #ifdef ASSERT  // Thread::_owned_locks is under the same ifdef
-      Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
-                    // Mutex::set_owner_implementation is a friend of Thread
+    Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
+    // Mutex::set_owner_implementation is a friend of Thread
 
-      assert(this->rank() >= 0, "bad lock rank");
+    assert(this->rank() >= 0, "bad lock rank");
 
-      // Deadlock avoidance rules require us to acquire Mutexes only in
-      // a global total order. For example m1 is the lowest ranked mutex
-      // that the thread holds and m2 is the mutex the thread is trying
-      // to acquire, then  deadlock avoidance rules require that the rank
-      // of m2 be less  than the rank of m1.
-      // The rank Mutex::native  is an exception in that it is not subject
-      // to the verification rules.
-      // Here are some further notes relating to mutex acquisition anomalies:
-      // . under Solaris, the interrupt lock gets acquired when doing
-      //   profiling, so any lock could be held.
-      // . it is also ok to acquire Safepoint_lock at the very end while we
-      //   already hold Terminator_lock - may happen because of periodic safepoints
-      if (this->rank() != Mutex::native &&
-          this->rank() != Mutex::suspend_resume &&
-          locks != NULL && locks->rank() <= this->rank() &&
-          !SafepointSynchronize::is_at_safepoint() &&
-          this != Interrupt_lock && this != ProfileVM_lock &&
-          !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
-            SafepointSynchronize::is_synchronizing())) {
-        new_owner->print_owned_locks();
-        fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
-                      "possible deadlock", this->name(), this->rank(),
-                      locks->name(), locks->rank()));
-      }
+    // Deadlock avoidance rules require us to acquire Mutexes only in
+    // a global total order. For example m1 is the lowest ranked mutex
+    // that the thread holds and m2 is the mutex the thread is trying
+    // to acquire, then  deadlock avoidance rules require that the rank
+    // of m2 be less  than the rank of m1.
+    // The rank Mutex::native  is an exception in that it is not subject
+    // to the verification rules.
+    // Here are some further notes relating to mutex acquisition anomalies:
+    // . under Solaris, the interrupt lock gets acquired when doing
+    //   profiling, so any lock could be held.
+    // . it is also ok to acquire Safepoint_lock at the very end while we
+    //   already hold Terminator_lock - may happen because of periodic safepoints
+    if (this->rank() != Mutex::native &&
+        this->rank() != Mutex::suspend_resume &&
+        locks != NULL && locks->rank() <= this->rank() &&
+        !SafepointSynchronize::is_at_safepoint() &&
+        this != Interrupt_lock && this != ProfileVM_lock &&
+        !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
+        SafepointSynchronize::is_synchronizing())) {
+      new_owner->print_owned_locks();
+      fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
+                    "possible deadlock", this->name(), this->rank(),
+                    locks->name(), locks->rank()));
+    }
 
-      this->_next = new_owner->_owned_locks;
-      new_owner->_owned_locks = this;
+    this->_next = new_owner->_owned_locks;
+    new_owner->_owned_locks = this;
     #endif
 
   } else {
@@ -1326,25 +1326,25 @@
     _owner = NULL; // set the owner
 
     #ifdef ASSERT
-      Monitor *locks = old_owner->owned_locks();
+    Monitor *locks = old_owner->owned_locks();
 
-      // remove "this" from the owned locks list
+    // remove "this" from the owned locks list
 
-      Monitor *prev = NULL;
-      bool found = false;
-      for (; locks != NULL; prev = locks, locks = locks->next()) {
-        if (locks == this) {
-          found = true;
-          break;
-        }
+    Monitor *prev = NULL;
+    bool found = false;
+    for (; locks != NULL; prev = locks, locks = locks->next()) {
+      if (locks == this) {
+        found = true;
+        break;
       }
-      assert(found, "Removing a lock not owned");
-      if (prev == NULL) {
-        old_owner->_owned_locks = _next;
-      } else {
-        prev->_next = _next;
-      }
-      _next = NULL;
+    }
+    assert(found, "Removing a lock not owned");
+    if (prev == NULL) {
+      old_owner->_owned_locks = _next;
+    } else {
+      prev->_next = _next;
+    }
+    _next = NULL;
     #endif
   }
 }
@@ -1360,11 +1360,11 @@
                     name()));
     }
     debug_only(if (rank() != Mutex::special) \
-      thread->check_for_valid_safepoint_state(false);)
+               thread->check_for_valid_safepoint_state(false);)
   }
   if (thread->is_Watcher_thread()) {
     assert(!WatcherThread::watcher_thread()->has_crash_protection(),
-        "locking not allowed when crash protection is set");
+           "locking not allowed when crash protection is set");
   }
 }
 
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp	Wed Sep 10 11:48:20 2014 -0600
@@ -45,7 +45,7 @@
 #include "utilities/preserveException.hpp"
 
 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
-  // Need to inhibit inlining for older versions of GCC to avoid build-time failures
+// Need to inhibit inlining for older versions of GCC to avoid build-time failures
   #define NOINLINE __attribute__((noinline))
 #else
   #define NOINLINE
@@ -254,11 +254,11 @@
 bool ObjectMonitor::try_enter(Thread* THREAD) {
   if (THREAD != _owner) {
     if (THREAD->is_lock_owned ((address)_owner)) {
-       assert(_recursions == 0, "internal state error");
-       _owner = THREAD;
-       _recursions = 1;
-       OwnerIsThread = 1;
-       return true;
+      assert(_recursions == 0, "internal state error");
+      _owner = THREAD;
+      _recursions = 1;
+      OwnerIsThread = 1;
+      return true;
     }
     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
       return false;
@@ -277,17 +277,17 @@
 
   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
   if (cur == NULL) {
-     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
-     assert(_recursions == 0   , "invariant");
-     assert(_owner      == Self, "invariant");
-     // CONSIDER: set or assert OwnerIsThread == 1
-     return;
+    // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
+    assert(_recursions == 0   , "invariant");
+    assert(_owner      == Self, "invariant");
+    // CONSIDER: set or assert OwnerIsThread == 1
+    return;
   }
 
   if (cur == Self) {
-     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
-     _recursions++;
-     return;
+    // TODO-FIXME: check for integer overflow!  BUGID 6557169.
+    _recursions++;
+    return;
   }
 
   if (Self->is_lock_owned ((address)cur)) {
@@ -310,11 +310,11 @@
   // Note that if we acquire the monitor from an initial spin
   // we forgo posting JVMTI events and firing DTRACE probes.
   if (Knob_SpinEarly && TrySpin (Self) > 0) {
-     assert(_owner == Self      , "invariant");
-     assert(_recursions == 0    , "invariant");
-     assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
-     Self->_Stalled = 0;
-     return;
+    assert(_owner == Self      , "invariant");
+    assert(_recursions == 0    , "invariant");
+    assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+    Self->_Stalled = 0;
+    return;
   }
 
   assert(_owner != Self          , "invariant");
@@ -367,7 +367,7 @@
       // the monitor while suspended because that would surprise the
       // thread that suspended us.
       //
-          _recursions = 0;
+      _recursions = 0;
       _succ = NULL;
       exit(false, Self);
 
@@ -426,7 +426,7 @@
   }
 
   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
-     ObjectMonitor::_sync_ContendedLockAttempts->inc();
+    ObjectMonitor::_sync_ContendedLockAttempts->inc();
   }
 }
 
@@ -452,244 +452,244 @@
 }
 
 void NOINLINE ObjectMonitor::EnterI (TRAPS) {
-    Thread * const Self = THREAD;
-    assert(Self->is_Java_thread(), "invariant");
-    assert(((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant");
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "invariant");
+  assert(((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant");
+
+  // Try the lock - TATAS
+  if (TryLock (Self) > 0) {
+    assert(_succ != Self              , "invariant");
+    assert(_owner == Self             , "invariant");
+    assert(_Responsible != Self       , "invariant");
+    return;
+  }
+
+  DeferredInitialize();
 
-    // Try the lock - TATAS
-    if (TryLock (Self) > 0) {
-        assert(_succ != Self              , "invariant");
-        assert(_owner == Self             , "invariant");
-        assert(_Responsible != Self       , "invariant");
-        return;
-    }
+  // We try one round of spinning *before* enqueueing Self.
+  //
+  // If the _owner is ready but OFFPROC we could use a YieldTo()
+  // operation to donate the remainder of this thread's quantum
+  // to the owner.  This has subtle but beneficial affinity
+  // effects.
 
-    DeferredInitialize();
-
-    // We try one round of spinning *before* enqueueing Self.
-    //
-    // If the _owner is ready but OFFPROC we could use a YieldTo()
-    // operation to donate the remainder of this thread's quantum
-    // to the owner.  This has subtle but beneficial affinity
-    // effects.
+  if (TrySpin (Self) > 0) {
+    assert(_owner == Self        , "invariant");
+    assert(_succ != Self         , "invariant");
+    assert(_Responsible != Self  , "invariant");
+    return;
+  }
 
-    if (TrySpin (Self) > 0) {
-        assert(_owner == Self        , "invariant");
-        assert(_succ != Self         , "invariant");
-        assert(_Responsible != Self  , "invariant");
-        return;
-    }
+  // The Spin failed -- Enqueue and park the thread ...
+  assert(_succ  != Self            , "invariant");
+  assert(_owner != Self            , "invariant");
+  assert(_Responsible != Self      , "invariant");
 
-    // The Spin failed -- Enqueue and park the thread ...
-    assert(_succ  != Self            , "invariant");
-    assert(_owner != Self            , "invariant");
-    assert(_Responsible != Self      , "invariant");
+  // Enqueue "Self" on ObjectMonitor's _cxq.
+  //
+  // Node acts as a proxy for Self.
+  // As an aside, if were to ever rewrite the synchronization code mostly
+  // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
+  // Java objects.  This would avoid awkward lifecycle and liveness issues,
+  // as well as eliminate a subset of ABA issues.
+  // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
+  //
 
-    // Enqueue "Self" on ObjectMonitor's _cxq.
-    //
-    // Node acts as a proxy for Self.
-    // As an aside, if were to ever rewrite the synchronization code mostly
-    // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
-    // Java objects.  This would avoid awkward lifecycle and liveness issues,
-    // as well as eliminate a subset of ABA issues.
-    // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
-    //
+  ObjectWaiter node(Self);
+  Self->_ParkEvent->reset();
+  node._prev   = (ObjectWaiter *) 0xBAD;
+  node.TState  = ObjectWaiter::TS_CXQ;
+
+  // Push "Self" onto the front of the _cxq.
+  // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
+  // Note that spinning tends to reduce the rate at which threads
+  // enqueue and dequeue on EntryList|cxq.
+  ObjectWaiter * nxt;
+  for (;;) {
+    node._next = nxt = _cxq;
+    if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break;
 
-    ObjectWaiter node(Self);
-    Self->_ParkEvent->reset();
-    node._prev   = (ObjectWaiter *) 0xBAD;
-    node.TState  = ObjectWaiter::TS_CXQ;
-
-    // Push "Self" onto the front of the _cxq.
-    // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
-    // Note that spinning tends to reduce the rate at which threads
-    // enqueue and dequeue on EntryList|cxq.
-    ObjectWaiter * nxt;
-    for (;;) {
-        node._next = nxt = _cxq;
-        if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break;
-
-        // Interference - the CAS failed because _cxq changed.  Just retry.
-        // As an optional optimization we retry the lock.
-        if (TryLock (Self) > 0) {
-            assert(_succ != Self         , "invariant");
-            assert(_owner == Self        , "invariant");
-            assert(_Responsible != Self  , "invariant");
-            return;
-        }
+    // Interference - the CAS failed because _cxq changed.  Just retry.
+    // As an optional optimization we retry the lock.
+    if (TryLock (Self) > 0) {
+      assert(_succ != Self         , "invariant");
+      assert(_owner == Self        , "invariant");
+      assert(_Responsible != Self  , "invariant");
+      return;
     }
+  }
 
-    // Check for cxq|EntryList edge transition to non-null.  This indicates
-    // the onset of contention.  While contention persists exiting threads
-    // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
-    // operations revert to the faster 1-0 mode.  This enter operation may interleave
-    // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
-    // arrange for one of the contending thread to use a timed park() operations
-    // to detect and recover from the race.  (Stranding is form of progress failure
-    // where the monitor is unlocked but all the contending threads remain parked).
-    // That is, at least one of the contended threads will periodically poll _owner.
-    // One of the contending threads will become the designated "Responsible" thread.
-    // The Responsible thread uses a timed park instead of a normal indefinite park
-    // operation -- it periodically wakes and checks for and recovers from potential
-    // strandings admitted by 1-0 exit operations.   We need at most one Responsible
-    // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
-    // be responsible for a monitor.
-    //
-    // Currently, one of the contended threads takes on the added role of "Responsible".
-    // A viable alternative would be to use a dedicated "stranding checker" thread
-    // that periodically iterated over all the threads (or active monitors) and unparked
-    // successors where there was risk of stranding.  This would help eliminate the
-    // timer scalability issues we see on some platforms as we'd only have one thread
-    // -- the checker -- parked on a timer.
+  // Check for cxq|EntryList edge transition to non-null.  This indicates
+  // the onset of contention.  While contention persists exiting threads
+  // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
+  // operations revert to the faster 1-0 mode.  This enter operation may interleave
+  // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
+  // arrange for one of the contending thread to use a timed park() operations
+  // to detect and recover from the race.  (Stranding is form of progress failure
+  // where the monitor is unlocked but all the contending threads remain parked).
+  // That is, at least one of the contended threads will periodically poll _owner.
+  // One of the contending threads will become the designated "Responsible" thread.
+  // The Responsible thread uses a timed park instead of a normal indefinite park
+  // operation -- it periodically wakes and checks for and recovers from potential
+  // strandings admitted by 1-0 exit operations.   We need at most one Responsible
+  // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
+  // be responsible for a monitor.
+  //
+  // Currently, one of the contended threads takes on the added role of "Responsible".
+  // A viable alternative would be to use a dedicated "stranding checker" thread
+  // that periodically iterated over all the threads (or active monitors) and unparked
+  // successors where there was risk of stranding.  This would help eliminate the
+  // timer scalability issues we see on some platforms as we'd only have one thread
+  // -- the checker -- parked on a timer.
 
-    if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
-        // Try to assume the role of responsible thread for the monitor.
-        // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
-        Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
+  if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
+    // Try to assume the role of responsible thread for the monitor.
+    // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
+    Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
+  }
+
+  // The lock might have been released while this thread was occupied queueing
+  // itself onto _cxq.  To close the race and avoid "stranding" and
+  // progress-liveness failure we must resample-retry _owner before parking.
+  // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
+  // In this case the ST-MEMBAR is accomplished with CAS().
+  //
+  // TODO: Defer all thread state transitions until park-time.
+  // Since state transitions are heavy and inefficient we'd like
+  // to defer the state transitions until absolutely necessary,
+  // and in doing so avoid some transitions ...
+
+  TEVENT(Inflated enter - Contention);
+  int nWakeups = 0;
+  int RecheckInterval = 1;
+
+  for (;;) {
+
+    if (TryLock(Self) > 0) break;
+    assert(_owner != Self, "invariant");
+
+    if ((SyncFlags & 2) && _Responsible == NULL) {
+      Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
     }
 
-    // The lock might have been released while this thread was occupied queueing
-    // itself onto _cxq.  To close the race and avoid "stranding" and
-    // progress-liveness failure we must resample-retry _owner before parking.
-    // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
-    // In this case the ST-MEMBAR is accomplished with CAS().
-    //
-    // TODO: Defer all thread state transitions until park-time.
-    // Since state transitions are heavy and inefficient we'd like
-    // to defer the state transitions until absolutely necessary,
-    // and in doing so avoid some transitions ...
-
-    TEVENT(Inflated enter - Contention);
-    int nWakeups = 0;
-    int RecheckInterval = 1;
-
-    for (;;) {
-
-        if (TryLock(Self) > 0) break;
-        assert(_owner != Self, "invariant");
-
-        if ((SyncFlags & 2) && _Responsible == NULL) {
-           Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
-        }
-
-        // park self
-        if (_Responsible == Self || (SyncFlags & 1)) {
-            TEVENT(Inflated enter - park TIMED);
-            Self->_ParkEvent->park((jlong) RecheckInterval);
-            // Increase the RecheckInterval, but clamp the value.
-            RecheckInterval *= 8;
-            if (RecheckInterval > 1000) RecheckInterval = 1000;
-        } else {
-            TEVENT(Inflated enter - park UNTIMED);
-            Self->_ParkEvent->park();
-        }
-
-        if (TryLock(Self) > 0) break;
-
-        // The lock is still contested.
-        // Keep a tally of the # of futile wakeups.
-        // Note that the counter is not protected by a lock or updated by atomics.
-        // That is by design - we trade "lossy" counters which are exposed to
-        // races during updates for a lower probe effect.
-        TEVENT(Inflated enter - Futile wakeup);
-        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
-           ObjectMonitor::_sync_FutileWakeups->inc();
-        }
-        ++nWakeups;
-
-        // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
-        // We can defer clearing _succ until after the spin completes
-        // TrySpin() must tolerate being called with _succ == Self.
-        // Try yet another round of adaptive spinning.
-        if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
-
-        // We can find that we were unpark()ed and redesignated _succ while
-        // we were spinning.  That's harmless.  If we iterate and call park(),
-        // park() will consume the event and return immediately and we'll
-        // just spin again.  This pattern can repeat, leaving _succ to simply
-        // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
-        // Alternately, we can sample fired() here, and if set, forgo spinning
-        // in the next iteration.
-
-        if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
-           Self->_ParkEvent->reset();
-           OrderAccess::fence();
-        }
-        if (_succ == Self) _succ = NULL;
-
-        // Invariant: after clearing _succ a thread *must* retry _owner before parking.
-        OrderAccess::fence();
+    // park self
+    if (_Responsible == Self || (SyncFlags & 1)) {
+      TEVENT(Inflated enter - park TIMED);
+      Self->_ParkEvent->park((jlong) RecheckInterval);
+      // Increase the RecheckInterval, but clamp the value.
+      RecheckInterval *= 8;
+      if (RecheckInterval > 1000) RecheckInterval = 1000;
+    } else {
+      TEVENT(Inflated enter - park UNTIMED);
+      Self->_ParkEvent->park();
     }
 
-    // Egress :
-    // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
-    // Normally we'll find Self on the EntryList .
-    // From the perspective of the lock owner (this thread), the
-    // EntryList is stable and cxq is prepend-only.
-    // The head of cxq is volatile but the interior is stable.
-    // In addition, Self.TState is stable.
+    if (TryLock(Self) > 0) break;
+
+    // The lock is still contested.
+    // Keep a tally of the # of futile wakeups.
+    // Note that the counter is not protected by a lock or updated by atomics.
+    // That is by design - we trade "lossy" counters which are exposed to
+    // races during updates for a lower probe effect.
+    TEVENT(Inflated enter - Futile wakeup);
+    if (ObjectMonitor::_sync_FutileWakeups != NULL) {
+      ObjectMonitor::_sync_FutileWakeups->inc();
+    }
+    ++nWakeups;
 
-    assert(_owner == Self      , "invariant");
-    assert(object() != NULL    , "invariant");
-    // I'd like to write:
-    //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
-    // but as we're at a safepoint that's not safe.
+    // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
+    // We can defer clearing _succ until after the spin completes
+    // TrySpin() must tolerate being called with _succ == Self.
+    // Try yet another round of adaptive spinning.
+    if ((Knob_SpinAfterFutile & 1) && TrySpin(Self) > 0) break;
 
-    UnlinkAfterAcquire(Self, &node);
+    // We can find that we were unpark()ed and redesignated _succ while
+    // we were spinning.  That's harmless.  If we iterate and call park(),
+    // park() will consume the event and return immediately and we'll
+    // just spin again.  This pattern can repeat, leaving _succ to simply
+    // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
+    // Alternately, we can sample fired() here, and if set, forgo spinning
+    // in the next iteration.
+
+    if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
+      Self->_ParkEvent->reset();
+      OrderAccess::fence();
+    }
     if (_succ == Self) _succ = NULL;
 
-    assert(_succ != Self, "invariant");
-    if (_Responsible == Self) {
-        _Responsible = NULL;
-        OrderAccess::fence(); // Dekker pivot-point
+    // Invariant: after clearing _succ a thread *must* retry _owner before parking.
+    OrderAccess::fence();
+  }
+
+  // Egress :
+  // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
+  // Normally we'll find Self on the EntryList .
+  // From the perspective of the lock owner (this thread), the
+  // EntryList is stable and cxq is prepend-only.
+  // The head of cxq is volatile but the interior is stable.
+  // In addition, Self.TState is stable.
 
-        // We may leave threads on cxq|EntryList without a designated
-        // "Responsible" thread.  This is benign.  When this thread subsequently
-        // exits the monitor it can "see" such preexisting "old" threads --
-        // threads that arrived on the cxq|EntryList before the fence, above --
-        // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
-        // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
-        // non-null and elect a new "Responsible" timer thread.
-        //
-        // This thread executes:
-        //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
-        //    LD cxq|EntryList               (in subsequent exit)
-        //
-        // Entering threads in the slow/contended path execute:
-        //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
-        //    The (ST cxq; MEMBAR) is accomplished with CAS().
-        //
-        // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
-        // exit operation from floating above the ST Responsible=null.
-    }
+  assert(_owner == Self      , "invariant");
+  assert(object() != NULL    , "invariant");
+  // I'd like to write:
+  //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+  // but as we're at a safepoint that's not safe.
+
+  UnlinkAfterAcquire(Self, &node);
+  if (_succ == Self) _succ = NULL;
+
+  assert(_succ != Self, "invariant");
+  if (_Responsible == Self) {
+    _Responsible = NULL;
+    OrderAccess::fence(); // Dekker pivot-point
 
-    // We've acquired ownership with CAS().
-    // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
-    // But since the CAS() this thread may have also stored into _succ,
-    // EntryList, cxq or Responsible.  These meta-data updates must be
-    // visible __before this thread subsequently drops the lock.
-    // Consider what could occur if we didn't enforce this constraint --
-    // STs to monitor meta-data and user-data could reorder with (become
-    // visible after) the ST in exit that drops ownership of the lock.
-    // Some other thread could then acquire the lock, but observe inconsistent
-    // or old monitor meta-data and heap data.  That violates the JMM.
-    // To that end, the 1-0 exit() operation must have at least STST|LDST
-    // "release" barrier semantics.  Specifically, there must be at least a
-    // STST|LDST barrier in exit() before the ST of null into _owner that drops
-    // the lock.   The barrier ensures that changes to monitor meta-data and data
-    // protected by the lock will be visible before we release the lock, and
-    // therefore before some other thread (CPU) has a chance to acquire the lock.
-    // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+    // We may leave threads on cxq|EntryList without a designated
+    // "Responsible" thread.  This is benign.  When this thread subsequently
+    // exits the monitor it can "see" such preexisting "old" threads --
+    // threads that arrived on the cxq|EntryList before the fence, above --
+    // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
+    // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
+    // non-null and elect a new "Responsible" timer thread.
+    //
+    // This thread executes:
+    //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
+    //    LD cxq|EntryList               (in subsequent exit)
+    //
+    // Entering threads in the slow/contended path execute:
+    //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
+    //    The (ST cxq; MEMBAR) is accomplished with CAS().
     //
-    // Critically, any prior STs to _succ or EntryList must be visible before
-    // the ST of null into _owner in the *subsequent* (following) corresponding
-    // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
-    // execute a serializing instruction.
+    // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
+    // exit operation from floating above the ST Responsible=null.
+  }
 
-    if (SyncFlags & 8) {
-       OrderAccess::fence();
-    }
-    return;
+  // We've acquired ownership with CAS().
+  // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
+  // But since the CAS() this thread may have also stored into _succ,
+  // EntryList, cxq or Responsible.  These meta-data updates must be
+  // visible __before this thread subsequently drops the lock.
+  // Consider what could occur if we didn't enforce this constraint --
+  // STs to monitor meta-data and user-data could reorder with (become
+  // visible after) the ST in exit that drops ownership of the lock.
+  // Some other thread could then acquire the lock, but observe inconsistent
+  // or old monitor meta-data and heap data.  That violates the JMM.
+  // To that end, the 1-0 exit() operation must have at least STST|LDST
+  // "release" barrier semantics.  Specifically, there must be at least a
+  // STST|LDST barrier in exit() before the ST of null into _owner that drops
+  // the lock.   The barrier ensures that changes to monitor meta-data and data
+  // protected by the lock will be visible before we release the lock, and
+  // therefore before some other thread (CPU) has a chance to acquire the lock.
+  // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+  //
+  // Critically, any prior STs to _succ or EntryList must be visible before
+  // the ST of null into _owner in the *subsequent* (following) corresponding
+  // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
+  // execute a serializing instruction.
+
+  if (SyncFlags & 8) {
+    OrderAccess::fence();
+  }
+  return;
 }
 
 // ReenterI() is a specialized inline form of the latter half of the
@@ -701,91 +701,91 @@
 // loop accordingly.
 
 void NOINLINE ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
-    assert(Self != NULL                , "invariant");
-    assert(SelfNode != NULL            , "invariant");
-    assert(SelfNode->_thread == Self   , "invariant");
-    assert(_waiters > 0                , "invariant");
-    assert(((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant");
-    assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
-    JavaThread * jt = (JavaThread *) Self;
+  assert(Self != NULL                , "invariant");
+  assert(SelfNode != NULL            , "invariant");
+  assert(SelfNode->_thread == Self   , "invariant");
+  assert(_waiters > 0                , "invariant");
+  assert(((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant");
+  assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
+  JavaThread * jt = (JavaThread *) Self;
 
-    int nWakeups = 0;
-    for (;;) {
-        ObjectWaiter::TStates v = SelfNode->TState;
-        guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
-        assert(_owner != Self, "invariant");
-
-        if (TryLock(Self) > 0) break;
-        if (TrySpin(Self) > 0) break;
+  int nWakeups = 0;
+  for (;;) {
+    ObjectWaiter::TStates v = SelfNode->TState;
+    guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
+    assert(_owner != Self, "invariant");
 
-        TEVENT(Wait Reentry - parking);
+    if (TryLock(Self) > 0) break;
+    if (TrySpin(Self) > 0) break;
 
-        // State transition wrappers around park() ...
-        // ReenterI() wisely defers state transitions until
-        // it's clear we must park the thread.
-        {
-           OSThreadContendState osts(Self->osthread());
-           ThreadBlockInVM tbivm(jt);
+    TEVENT(Wait Reentry - parking);
 
-           // cleared by handle_special_suspend_equivalent_condition()
-           // or java_suspend_self()
-           jt->set_suspend_equivalent();
-           if (SyncFlags & 1) {
-              Self->_ParkEvent->park((jlong)1000);
-           } else {
-              Self->_ParkEvent->park();
-           }
-
-           // were we externally suspended while we were waiting?
-           for (;;) {
-              if (!ExitSuspendEquivalent(jt)) break;
-              if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
-              jt->java_suspend_self();
-              jt->set_suspend_equivalent();
-           }
-        }
+    // State transition wrappers around park() ...
+    // ReenterI() wisely defers state transitions until
+    // it's clear we must park the thread.
+    {
+      OSThreadContendState osts(Self->osthread());
+      ThreadBlockInVM tbivm(jt);
 
-        // Try again, but just so we distinguish between futile wakeups and
-        // successful wakeups.  The following test isn't algorithmically
-        // necessary, but it helps us maintain sensible statistics.
-        if (TryLock(Self) > 0) break;
+      // cleared by handle_special_suspend_equivalent_condition()
+      // or java_suspend_self()
+      jt->set_suspend_equivalent();
+      if (SyncFlags & 1) {
+        Self->_ParkEvent->park((jlong)1000);
+      } else {
+        Self->_ParkEvent->park();
+      }
 
-        // The lock is still contested.
-        // Keep a tally of the # of futile wakeups.
-        // Note that the counter is not protected by a lock or updated by atomics.
-        // That is by design - we trade "lossy" counters which are exposed to
-        // races during updates for a lower probe effect.
-        TEVENT(Wait Reentry - futile wakeup);
-        ++nWakeups;
-
-        // Assuming this is not a spurious wakeup we'll normally
-        // find that _succ == Self.
-        if (_succ == Self) _succ = NULL;
-
-        // Invariant: after clearing _succ a contending thread
-        // *must* retry  _owner before parking.
-        OrderAccess::fence();
-
-        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
-          ObjectMonitor::_sync_FutileWakeups->inc();
-        }
+      // were we externally suspended while we were waiting?
+      for (;;) {
+        if (!ExitSuspendEquivalent(jt)) break;
+        if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
+        jt->java_suspend_self();
+        jt->set_suspend_equivalent();
+      }
     }
 
-    // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
-    // Normally we'll find Self on the EntryList.
-    // Unlinking from the EntryList is constant-time and atomic-free.
-    // From the perspective of the lock owner (this thread), the
-    // EntryList is stable and cxq is prepend-only.
-    // The head of cxq is volatile but the interior is stable.
-    // In addition, Self.TState is stable.
+    // Try again, but just so we distinguish between futile wakeups and
+    // successful wakeups.  The following test isn't algorithmically
+    // necessary, but it helps us maintain sensible statistics.
+    if (TryLock(Self) > 0) break;
+
+    // The lock is still contested.
+    // Keep a tally of the # of futile wakeups.
+    // Note that the counter is not protected by a lock or updated by atomics.
+    // That is by design - we trade "lossy" counters which are exposed to
+    // races during updates for a lower probe effect.
+    TEVENT(Wait Reentry - futile wakeup);
+    ++nWakeups;
+
+    // Assuming this is not a spurious wakeup we'll normally
+    // find that _succ == Self.
+    if (_succ == Self) _succ = NULL;
+
+    // Invariant: after clearing _succ a contending thread
+    // *must* retry  _owner before parking.
+    OrderAccess::fence();
 
-    assert(_owner == Self, "invariant");
-    assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
-    UnlinkAfterAcquire(Self, SelfNode);
-    if (_succ == Self) _succ = NULL;
-    assert(_succ != Self, "invariant");
-    SelfNode->TState = ObjectWaiter::TS_RUN;
-    OrderAccess::fence();      // see comments at the end of EnterI()
+    if (ObjectMonitor::_sync_FutileWakeups != NULL) {
+      ObjectMonitor::_sync_FutileWakeups->inc();
+    }
+  }
+
+  // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
+  // Normally we'll find Self on the EntryList.
+  // Unlinking from the EntryList is constant-time and atomic-free.
+  // From the perspective of the lock owner (this thread), the
+  // EntryList is stable and cxq is prepend-only.
+  // The head of cxq is volatile but the interior is stable.
+  // In addition, Self.TState is stable.
+
+  assert(_owner == Self, "invariant");
+  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+  UnlinkAfterAcquire(Self, SelfNode);
+  if (_succ == Self) _succ = NULL;
+  assert(_succ != Self, "invariant");
+  SelfNode->TState = ObjectWaiter::TS_RUN;
+  OrderAccess::fence();      // see comments at the end of EnterI()
 }
 
 // By convention we unlink a contending thread from EntryList|cxq immediately
@@ -794,66 +794,66 @@
 
 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
 {
-    assert(_owner == Self, "invariant");
-    assert(SelfNode->_thread == Self, "invariant");
+  assert(_owner == Self, "invariant");
+  assert(SelfNode->_thread == Self, "invariant");
 
-    if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
-        // Normal case: remove Self from the DLL EntryList .
-        // This is a constant-time operation.
-        ObjectWaiter * nxt = SelfNode->_next;
-        ObjectWaiter * prv = SelfNode->_prev;
-        if (nxt != NULL) nxt->_prev = prv;
-        if (prv != NULL) prv->_next = nxt;
-        if (SelfNode == _EntryList) _EntryList = nxt;
-        assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
-        assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
-        TEVENT(Unlink from EntryList);
-    } else {
-        assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
-        // Inopportune interleaving -- Self is still on the cxq.
-        // This usually means the enqueue of self raced an exiting thread.
-        // Normally we'll find Self near the front of the cxq, so
-        // dequeueing is typically fast.  If needbe we can accelerate
-        // this with some MCS/CHL-like bidirectional list hints and advisory
-        // back-links so dequeueing from the interior will normally operate
-        // in constant-time.
-        // Dequeue Self from either the head (with CAS) or from the interior
-        // with a linear-time scan and normal non-atomic memory operations.
-        // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
-        // and then unlink Self from EntryList.  We have to drain eventually,
-        // so it might as well be now.
+  if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
+    // Normal case: remove Self from the DLL EntryList .
+    // This is a constant-time operation.
+    ObjectWaiter * nxt = SelfNode->_next;
+    ObjectWaiter * prv = SelfNode->_prev;
+    if (nxt != NULL) nxt->_prev = prv;
+    if (prv != NULL) prv->_next = nxt;
+    if (SelfNode == _EntryList) _EntryList = nxt;
+    assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
+    assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
+    TEVENT(Unlink from EntryList);
+  } else {
+    assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
+    // Inopportune interleaving -- Self is still on the cxq.
+    // This usually means the enqueue of self raced an exiting thread.
+    // Normally we'll find Self near the front of the cxq, so
+    // dequeueing is typically fast.  If needbe we can accelerate
+    // this with some MCS/CHL-like bidirectional list hints and advisory
+    // back-links so dequeueing from the interior will normally operate
+    // in constant-time.
+    // Dequeue Self from either the head (with CAS) or from the interior
+    // with a linear-time scan and normal non-atomic memory operations.
+    // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
+    // and then unlink Self from EntryList.  We have to drain eventually,
+    // so it might as well be now.
 
-        ObjectWaiter * v = _cxq;
-        assert(v != NULL, "invariant");
-        if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
-            // The CAS above can fail from interference IFF a "RAT" arrived.
-            // In that case Self must be in the interior and can no longer be
-            // at the head of cxq.
-            if (v == SelfNode) {
-                assert(_cxq != v, "invariant");
-                v = _cxq;          // CAS above failed - start scan at head of list
-            }
-            ObjectWaiter * p;
-            ObjectWaiter * q = NULL;
-            for (p = v; p != NULL && p != SelfNode; p = p->_next) {
-                q = p;
-                assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
-            }
-            assert(v != SelfNode, "invariant");
-            assert(p == SelfNode, "Node not found on cxq");
-            assert(p != _cxq, "invariant");
-            assert(q != NULL, "invariant");
-            assert(q->_next == p, "invariant");
-            q->_next = p->_next;
-        }
-        TEVENT(Unlink from cxq);
+    ObjectWaiter * v = _cxq;
+    assert(v != NULL, "invariant");
+    if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
+      // The CAS above can fail from interference IFF a "RAT" arrived.
+      // In that case Self must be in the interior and can no longer be
+      // at the head of cxq.
+      if (v == SelfNode) {
+        assert(_cxq != v, "invariant");
+        v = _cxq;          // CAS above failed - start scan at head of list
+      }
+      ObjectWaiter * p;
+      ObjectWaiter * q = NULL;
+      for (p = v; p != NULL && p != SelfNode; p = p->_next) {
+        q = p;
+        assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
+      }
+      assert(v != SelfNode, "invariant");
+      assert(p == SelfNode, "Node not found on cxq");
+      assert(p != _cxq, "invariant");
+      assert(q != NULL, "invariant");
+      assert(q->_next == p, "invariant");
+      q->_next = p->_next;
     }
+    TEVENT(Unlink from cxq);
+  }
 
 #ifdef ASSERT
-    // Diagnostic hygiene ...
-    SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
-    SelfNode->_next  = (ObjectWaiter *) 0xBAD;
-    SelfNode->TState = ObjectWaiter::TS_RUN;
+  // Diagnostic hygiene ...
+  SelfNode->_prev  = (ObjectWaiter *) 0xBAD;
+  SelfNode->_next  = (ObjectWaiter *) 0xBAD;
+  SelfNode->TState = ObjectWaiter::TS_RUN;
 #endif
 }
 
@@ -915,331 +915,331 @@
 // of such futile wakups is low.
 
 void NOINLINE ObjectMonitor::exit(bool not_suspended, TRAPS) {
-   Thread * const Self = THREAD;
-   if (THREAD != _owner) {
-     if (THREAD->is_lock_owned((address) _owner)) {
-       // Transmute _owner from a BasicLock pointer to a Thread address.
-       // We don't need to hold _mutex for this transition.
-       // Non-null to Non-null is safe as long as all readers can
-       // tolerate either flavor.
-       assert(_recursions == 0, "invariant");
-       _owner = THREAD;
-       _recursions = 0;
-       OwnerIsThread = 1;
-     } else {
-       // Apparent unbalanced locking ...
-       // Naively we'd like to throw IllegalMonitorStateException.
-       // As a practical matter we can neither allocate nor throw an
-       // exception as ::exit() can be called from leaf routines.
-       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
-       // Upon deeper reflection, however, in a properly run JVM the only
-       // way we should encounter this situation is in the presence of
-       // unbalanced JNI locking. TODO: CheckJNICalls.
-       // See also: CR4414101
-       TEVENT(Exit - Throw IMSX);
-       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
-       return;
-     }
-   }
+  Thread * const Self = THREAD;
+  if (THREAD != _owner) {
+    if (THREAD->is_lock_owned((address) _owner)) {
+      // Transmute _owner from a BasicLock pointer to a Thread address.
+      // We don't need to hold _mutex for this transition.
+      // Non-null to Non-null is safe as long as all readers can
+      // tolerate either flavor.
+      assert(_recursions == 0, "invariant");
+      _owner = THREAD;
+      _recursions = 0;
+      OwnerIsThread = 1;
+    } else {
+      // Apparent unbalanced locking ...
+      // Naively we'd like to throw IllegalMonitorStateException.
+      // As a practical matter we can neither allocate nor throw an
+      // exception as ::exit() can be called from leaf routines.
+      // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
+      // Upon deeper reflection, however, in a properly run JVM the only
+      // way we should encounter this situation is in the presence of
+      // unbalanced JNI locking. TODO: CheckJNICalls.
+      // See also: CR4414101
+      TEVENT(Exit - Throw IMSX);
+      assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
+      return;
+    }
+  }
 
-   if (_recursions != 0) {
-     _recursions--;        // this is simple recursive enter
-     TEVENT(Inflated exit - recursive);
-     return;
-   }
+  if (_recursions != 0) {
+    _recursions--;        // this is simple recursive enter
+    TEVENT(Inflated exit - recursive);
+    return;
+  }
 
-   // Invariant: after setting Responsible=null an thread must execute
-   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
-   if ((SyncFlags & 4) == 0) {
-      _Responsible = NULL;
-   }
+  // Invariant: after setting Responsible=null an thread must execute
+  // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
+  if ((SyncFlags & 4) == 0) {
+    _Responsible = NULL;
+  }
 
 #if INCLUDE_TRACE
-   // get the owner's thread id for the MonitorEnter event
-   // if it is enabled and the thread isn't suspended
-   if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
-     _previous_owner_tid = SharedRuntime::get_java_tid(Self);
-   }
+  // get the owner's thread id for the MonitorEnter event
+  // if it is enabled and the thread isn't suspended
+  if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
+    _previous_owner_tid = SharedRuntime::get_java_tid(Self);
+  }
 #endif
 
-   for (;;) {
-      assert(THREAD == _owner, "invariant");
+  for (;;) {
+    assert(THREAD == _owner, "invariant");
 
 
-      if (Knob_ExitPolicy == 0) {
-         // release semantics: prior loads and stores from within the critical section
-         // must not float (reorder) past the following store that drops the lock.
-         // On SPARC that requires MEMBAR #loadstore|#storestore.
-         // But of course in TSO #loadstore|#storestore is not required.
-         // I'd like to write one of the following:
-         // A.  OrderAccess::release() ; _owner = NULL
-         // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
-         // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
-         // store into a _dummy variable.  That store is not needed, but can result
-         // in massive wasteful coherency traffic on classic SMP systems.
-         // Instead, I use release_store(), which is implemented as just a simple
-         // ST on x64, x86 and SPARC.
-         OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
-         OrderAccess::storeload();                         // See if we need to wake a successor
-         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
-            TEVENT(Inflated exit - simple egress);
-            return;
-         }
-         TEVENT(Inflated exit - complex egress);
-         // Other threads are blocked trying to acquire the lock.
+    if (Knob_ExitPolicy == 0) {
+      // release semantics: prior loads and stores from within the critical section
+      // must not float (reorder) past the following store that drops the lock.
+      // On SPARC that requires MEMBAR #loadstore|#storestore.
+      // But of course in TSO #loadstore|#storestore is not required.
+      // I'd like to write one of the following:
+      // A.  OrderAccess::release() ; _owner = NULL
+      // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
+      // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
+      // store into a _dummy variable.  That store is not needed, but can result
+      // in massive wasteful coherency traffic on classic SMP systems.
+      // Instead, I use release_store(), which is implemented as just a simple
+      // ST on x64, x86 and SPARC.
+      OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
+      OrderAccess::storeload();                         // See if we need to wake a successor
+      if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+        TEVENT(Inflated exit - simple egress);
+        return;
+      }
+      TEVENT(Inflated exit - complex egress);
+      // Other threads are blocked trying to acquire the lock.
+
+      // Normally the exiting thread is responsible for ensuring succession,
+      // but if other successors are ready or other entering threads are spinning
+      // then this thread can simply store NULL into _owner and exit without
+      // waking a successor.  The existence of spinners or ready successors
+      // guarantees proper succession (liveness).  Responsibility passes to the
+      // ready or running successors.  The exiting thread delegates the duty.
+      // More precisely, if a successor already exists this thread is absolved
+      // of the responsibility of waking (unparking) one.
+      //
+      // The _succ variable is critical to reducing futile wakeup frequency.
+      // _succ identifies the "heir presumptive" thread that has been made
+      // ready (unparked) but that has not yet run.  We need only one such
+      // successor thread to guarantee progress.
+      // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
+      // section 3.3 "Futile Wakeup Throttling" for details.
+      //
+      // Note that spinners in Enter() also set _succ non-null.
+      // In the current implementation spinners opportunistically set
+      // _succ so that exiting threads might avoid waking a successor.
+      // Another less appealing alternative would be for the exiting thread
+      // to drop the lock and then spin briefly to see if a spinner managed
+      // to acquire the lock.  If so, the exiting thread could exit
+      // immediately without waking a successor, otherwise the exiting
+      // thread would need to dequeue and wake a successor.
+      // (Note that we'd need to make the post-drop spin short, but no
+      // shorter than the worst-case round-trip cache-line migration time.
+      // The dropped lock needs to become visible to the spinner, and then
+      // the acquisition of the lock by the spinner must become visible to
+      // the exiting thread).
+      //
 
-         // Normally the exiting thread is responsible for ensuring succession,
-         // but if other successors are ready or other entering threads are spinning
-         // then this thread can simply store NULL into _owner and exit without
-         // waking a successor.  The existence of spinners or ready successors
-         // guarantees proper succession (liveness).  Responsibility passes to the
-         // ready or running successors.  The exiting thread delegates the duty.
-         // More precisely, if a successor already exists this thread is absolved
-         // of the responsibility of waking (unparking) one.
-         //
-         // The _succ variable is critical to reducing futile wakeup frequency.
-         // _succ identifies the "heir presumptive" thread that has been made
-         // ready (unparked) but that has not yet run.  We need only one such
-         // successor thread to guarantee progress.
-         // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
-         // section 3.3 "Futile Wakeup Throttling" for details.
-         //
-         // Note that spinners in Enter() also set _succ non-null.
-         // In the current implementation spinners opportunistically set
-         // _succ so that exiting threads might avoid waking a successor.
-         // Another less appealing alternative would be for the exiting thread
-         // to drop the lock and then spin briefly to see if a spinner managed
-         // to acquire the lock.  If so, the exiting thread could exit
-         // immediately without waking a successor, otherwise the exiting
-         // thread would need to dequeue and wake a successor.
-         // (Note that we'd need to make the post-drop spin short, but no
-         // shorter than the worst-case round-trip cache-line migration time.
-         // The dropped lock needs to become visible to the spinner, and then
-         // the acquisition of the lock by the spinner must become visible to
-         // the exiting thread).
-         //
+      // It appears that an heir-presumptive (successor) must be made ready.
+      // Only the current lock owner can manipulate the EntryList or
+      // drain _cxq, so we need to reacquire the lock.  If we fail
+      // to reacquire the lock the responsibility for ensuring succession
+      // falls to the new owner.
+      //
+      if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+        return;
+      }
+      TEVENT(Exit - Reacquired);
+    } else {
+      if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+        OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
+        OrderAccess::storeload();
+        // Ratify the previously observed values.
+        if (_cxq == NULL || _succ != NULL) {
+          TEVENT(Inflated exit - simple egress);
+          return;
+        }
 
-         // It appears that an heir-presumptive (successor) must be made ready.
-         // Only the current lock owner can manipulate the EntryList or
-         // drain _cxq, so we need to reacquire the lock.  If we fail
-         // to reacquire the lock the responsibility for ensuring succession
-         // falls to the new owner.
-         //
-         if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
-            return;
-         }
-         TEVENT(Exit - Reacquired);
+        // inopportune interleaving -- the exiting thread (this thread)
+        // in the fast-exit path raced an entering thread in the slow-enter
+        // path.
+        // We have two choices:
+        // A.  Try to reacquire the lock.
+        //     If the CAS() fails return immediately, otherwise
+        //     we either restart/rerun the exit operation, or simply
+        //     fall-through into the code below which wakes a successor.
+        // B.  If the elements forming the EntryList|cxq are TSM
+        //     we could simply unpark() the lead thread and return
+        //     without having set _succ.
+        if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+          TEVENT(Inflated exit - reacquired succeeded);
+          return;
+        }
+        TEVENT(Inflated exit - reacquired failed);
       } else {
-         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
-            OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
-            OrderAccess::storeload();
-            // Ratify the previously observed values.
-            if (_cxq == NULL || _succ != NULL) {
-                TEVENT(Inflated exit - simple egress);
-                return;
-            }
+        TEVENT(Inflated exit - complex egress);
+      }
+    }
+
+    guarantee(_owner == THREAD, "invariant");
+
+    ObjectWaiter * w = NULL;
+    int QMode = Knob_QMode;
 
-            // inopportune interleaving -- the exiting thread (this thread)
-            // in the fast-exit path raced an entering thread in the slow-enter
-            // path.
-            // We have two choices:
-            // A.  Try to reacquire the lock.
-            //     If the CAS() fails return immediately, otherwise
-            //     we either restart/rerun the exit operation, or simply
-            //     fall-through into the code below which wakes a successor.
-            // B.  If the elements forming the EntryList|cxq are TSM
-            //     we could simply unpark() the lead thread and return
-            //     without having set _succ.
-            if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
-               TEVENT(Inflated exit - reacquired succeeded);
-               return;
-            }
-            TEVENT(Inflated exit - reacquired failed);
-         } else {
-            TEVENT(Inflated exit - complex egress);
-         }
+    if (QMode == 2 && _cxq != NULL) {
+      // QMode == 2 : cxq has precedence over EntryList.
+      // Try to directly wake a successor from the cxq.
+      // If successful, the successor will need to unlink itself from cxq.
+      w = _cxq;
+      assert(w != NULL, "invariant");
+      assert(w->TState == ObjectWaiter::TS_CXQ, "Invariant");
+      ExitEpilog(Self, w);
+      return;
+    }
+
+    if (QMode == 3 && _cxq != NULL) {
+      // Aggressively drain cxq into EntryList at the first opportunity.
+      // This policy ensure that recently-run threads live at the head of EntryList.
+      // Drain _cxq into EntryList - bulk transfer.
+      // First, detach _cxq.
+      // The following loop is tantamount to: w = swap (&cxq, NULL)
+      w = _cxq;
+      for (;;) {
+        assert(w != NULL, "Invariant");
+        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+        if (u == w) break;
+        w = u;
+      }
+      assert(w != NULL              , "invariant");
+
+      ObjectWaiter * q = NULL;
+      ObjectWaiter * p;
+      for (p = w; p != NULL; p = p->_next) {
+        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
+        p->TState = ObjectWaiter::TS_ENTER;
+        p->_prev = q;
+        q = p;
       }
 
-      guarantee(_owner == THREAD, "invariant");
-
-      ObjectWaiter * w = NULL;
-      int QMode = Knob_QMode;
-
-      if (QMode == 2 && _cxq != NULL) {
-          // QMode == 2 : cxq has precedence over EntryList.
-          // Try to directly wake a successor from the cxq.
-          // If successful, the successor will need to unlink itself from cxq.
-          w = _cxq;
-          assert(w != NULL, "invariant");
-          assert(w->TState == ObjectWaiter::TS_CXQ, "Invariant");
-          ExitEpilog(Self, w);
-          return;
-      }
-
-      if (QMode == 3 && _cxq != NULL) {
-          // Aggressively drain cxq into EntryList at the first opportunity.
-          // This policy ensure that recently-run threads live at the head of EntryList.
-          // Drain _cxq into EntryList - bulk transfer.
-          // First, detach _cxq.
-          // The following loop is tantamount to: w = swap (&cxq, NULL)
-          w = _cxq;
-          for (;;) {
-             assert(w != NULL, "Invariant");
-             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
-             if (u == w) break;
-             w = u;
-          }
-          assert(w != NULL              , "invariant");
-
-          ObjectWaiter * q = NULL;
-          ObjectWaiter * p;
-          for (p = w; p != NULL; p = p->_next) {
-              guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
-              p->TState = ObjectWaiter::TS_ENTER;
-              p->_prev = q;
-              q = p;
-          }
-
-          // Append the RATs to the EntryList
-          // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
-          ObjectWaiter * Tail;
-          for (Tail = _EntryList; Tail != NULL && Tail->_next != NULL; Tail = Tail->_next);
-          if (Tail == NULL) {
-              _EntryList = w;
-          } else {
-              Tail->_next = w;
-              w->_prev = Tail;
-          }
-
-          // Fall thru into code that tries to wake a successor from EntryList
+      // Append the RATs to the EntryList
+      // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
+      ObjectWaiter * Tail;
+      for (Tail = _EntryList; Tail != NULL && Tail->_next != NULL; Tail = Tail->_next);
+      if (Tail == NULL) {
+        _EntryList = w;
+      } else {
+        Tail->_next = w;
+        w->_prev = Tail;
       }
 
-      if (QMode == 4 && _cxq != NULL) {
-          // Aggressively drain cxq into EntryList at the first opportunity.
-          // This policy ensure that recently-run threads live at the head of EntryList.
-
-          // Drain _cxq into EntryList - bulk transfer.
-          // First, detach _cxq.
-          // The following loop is tantamount to: w = swap (&cxq, NULL)
-          w = _cxq;
-          for (;;) {
-             assert(w != NULL, "Invariant");
-             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
-             if (u == w) break;
-             w = u;
-          }
-          assert(w != NULL              , "invariant");
-
-          ObjectWaiter * q = NULL;
-          ObjectWaiter * p;
-          for (p = w; p != NULL; p = p->_next) {
-              guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
-              p->TState = ObjectWaiter::TS_ENTER;
-              p->_prev = q;
-              q = p;
-          }
+      // Fall thru into code that tries to wake a successor from EntryList
+    }
 
-          // Prepend the RATs to the EntryList
-          if (_EntryList != NULL) {
-              q->_next = _EntryList;
-              _EntryList->_prev = q;
-          }
-          _EntryList = w;
-
-          // Fall thru into code that tries to wake a successor from EntryList
-      }
-
-      w = _EntryList;
-      if (w != NULL) {
-          // I'd like to write: guarantee (w->_thread != Self).
-          // But in practice an exiting thread may find itself on the EntryList.
-          // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
-          // then calls exit().  Exit release the lock by setting O._owner to NULL.
-          // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
-          // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
-          // release the lock "O".  T2 resumes immediately after the ST of null into
-          // _owner, above.  T2 notices that the EntryList is populated, so it
-          // reacquires the lock and then finds itself on the EntryList.
-          // Given all that, we have to tolerate the circumstance where "w" is
-          // associated with Self.
-          assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
-          ExitEpilog(Self, w);
-          return;
-      }
-
-      // If we find that both _cxq and EntryList are null then just
-      // re-run the exit protocol from the top.
-      w = _cxq;
-      if (w == NULL) continue;
+    if (QMode == 4 && _cxq != NULL) {
+      // Aggressively drain cxq into EntryList at the first opportunity.
+      // This policy ensure that recently-run threads live at the head of EntryList.
 
       // Drain _cxq into EntryList - bulk transfer.
       // First, detach _cxq.
       // The following loop is tantamount to: w = swap (&cxq, NULL)
+      w = _cxq;
       for (;;) {
-          assert(w != NULL, "Invariant");
-          ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
-          if (u == w) break;
-          w = u;
+        assert(w != NULL, "Invariant");
+        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+        if (u == w) break;
+        w = u;
       }
-      TEVENT(Inflated exit - drain cxq into EntryList);
-
       assert(w != NULL              , "invariant");
-      assert(_EntryList  == NULL    , "invariant");
-
-      // Convert the LIFO SLL anchored by _cxq into a DLL.
-      // The list reorganization step operates in O(LENGTH(w)) time.
-      // It's critical that this step operate quickly as
-      // "Self" still holds the outer-lock, restricting parallelism
-      // and effectively lengthening the critical section.
-      // Invariant: s chases t chases u.
-      // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
-      // we have faster access to the tail.
 
-      if (QMode == 1) {
-         // QMode == 1 : drain cxq to EntryList, reversing order
-         // We also reverse the order of the list.
-         ObjectWaiter * s = NULL;
-         ObjectWaiter * t = w;
-         ObjectWaiter * u = NULL;
-         while (t != NULL) {
-             guarantee(t->TState == ObjectWaiter::TS_CXQ, "invariant");
-             t->TState = ObjectWaiter::TS_ENTER;
-             u = t->_next;
-             t->_prev = u;
-             t->_next = s;
-             s = t;
-             t = u;
-         }
-         _EntryList  = s;
-         assert(s != NULL, "invariant");
-      } else {
-         // QMode == 0 or QMode == 2
-         _EntryList = w;
-         ObjectWaiter * q = NULL;
-         ObjectWaiter * p;
-         for (p = w; p != NULL; p = p->_next) {
-             guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
-             p->TState = ObjectWaiter::TS_ENTER;
-             p->_prev = q;
-             q = p;
-         }
+      ObjectWaiter * q = NULL;
+      ObjectWaiter * p;
+      for (p = w; p != NULL; p = p->_next) {
+        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
+        p->TState = ObjectWaiter::TS_ENTER;
+        p->_prev = q;
+        q = p;
       }
 
-      // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
-      // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
+      // Prepend the RATs to the EntryList
+      if (_EntryList != NULL) {
+        q->_next = _EntryList;
+        _EntryList->_prev = q;
+      }
+      _EntryList = w;
+
+      // Fall thru into code that tries to wake a successor from EntryList
+    }
 
-      // See if we can abdicate to a spinner instead of waking a thread.
-      // A primary goal of the implementation is to reduce the
-      // context-switch rate.
-      if (_succ != NULL) continue;
+    w = _EntryList;
+    if (w != NULL) {
+      // I'd like to write: guarantee (w->_thread != Self).
+      // But in practice an exiting thread may find itself on the EntryList.
+      // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
+      // then calls exit().  Exit release the lock by setting O._owner to NULL.
+      // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
+      // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
+      // release the lock "O".  T2 resumes immediately after the ST of null into
+      // _owner, above.  T2 notices that the EntryList is populated, so it
+      // reacquires the lock and then finds itself on the EntryList.
+      // Given all that, we have to tolerate the circumstance where "w" is
+      // associated with Self.
+      assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
+      ExitEpilog(Self, w);
+      return;
+    }
+
+    // If we find that both _cxq and EntryList are null then just
+    // re-run the exit protocol from the top.
+    w = _cxq;
+    if (w == NULL) continue;
+
+    // Drain _cxq into EntryList - bulk transfer.
+    // First, detach _cxq.
+    // The following loop is tantamount to: w = swap (&cxq, NULL)
+    for (;;) {
+      assert(w != NULL, "Invariant");
+      ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+      if (u == w) break;
+      w = u;
+    }
+    TEVENT(Inflated exit - drain cxq into EntryList);
+
+    assert(w != NULL              , "invariant");
+    assert(_EntryList  == NULL    , "invariant");
 
-      w = _EntryList;
-      if (w != NULL) {
-          guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
-          ExitEpilog(Self, w);
-          return;
+    // Convert the LIFO SLL anchored by _cxq into a DLL.
+    // The list reorganization step operates in O(LENGTH(w)) time.
+    // It's critical that this step operate quickly as
+    // "Self" still holds the outer-lock, restricting parallelism
+    // and effectively lengthening the critical section.
+    // Invariant: s chases t chases u.
+    // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
+    // we have faster access to the tail.
+
+    if (QMode == 1) {
+      // QMode == 1 : drain cxq to EntryList, reversing order
+      // We also reverse the order of the list.
+      ObjectWaiter * s = NULL;
+      ObjectWaiter * t = w;
+      ObjectWaiter * u = NULL;
+      while (t != NULL) {
+        guarantee(t->TState == ObjectWaiter::TS_CXQ, "invariant");
+        t->TState = ObjectWaiter::TS_ENTER;
+        u = t->_next;
+        t->_prev = u;
+        t->_next = s;
+        s = t;
+        t = u;
       }
-   }
+      _EntryList  = s;
+      assert(s != NULL, "invariant");
+    } else {
+      // QMode == 0 or QMode == 2
+      _EntryList = w;
+      ObjectWaiter * q = NULL;
+      ObjectWaiter * p;
+      for (p = w; p != NULL; p = p->_next) {
+        guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
+        p->TState = ObjectWaiter::TS_ENTER;
+        p->_prev = q;
+        q = p;
+      }
+    }
+
+    // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
+    // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
+
+    // See if we can abdicate to a spinner instead of waking a thread.
+    // A primary goal of the implementation is to reduce the
+    // context-switch rate.
+    if (_succ != NULL) continue;
+
+    w = _EntryList;
+    if (w != NULL) {
+      guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
+      ExitEpilog(Self, w);
+      return;
+    }
+  }
 }
 
 // ExitSuspendEquivalent:
@@ -1278,52 +1278,52 @@
 
 
 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
-   const int Mode = Knob_FastHSSEC;
-   if (Mode && !jSelf->is_external_suspend()) {
-      assert(jSelf->is_suspend_equivalent(), "invariant");
-      jSelf->clear_suspend_equivalent();
-      if (2 == Mode) OrderAccess::storeload();
-      if (!jSelf->is_external_suspend()) return false;
-      // We raced a suspension -- fall thru into the slow path
-      TEVENT(ExitSuspendEquivalent - raced);
-      jSelf->set_suspend_equivalent();
-   }
-   return jSelf->handle_special_suspend_equivalent_condition();
+  const int Mode = Knob_FastHSSEC;
+  if (Mode && !jSelf->is_external_suspend()) {
+    assert(jSelf->is_suspend_equivalent(), "invariant");
+    jSelf->clear_suspend_equivalent();
+    if (2 == Mode) OrderAccess::storeload();
+    if (!jSelf->is_external_suspend()) return false;
+    // We raced a suspension -- fall thru into the slow path
+    TEVENT(ExitSuspendEquivalent - raced);
+    jSelf->set_suspend_equivalent();
+  }
+  return jSelf->handle_special_suspend_equivalent_condition();
 }
 
 
 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
-   assert(_owner == Self, "invariant");
+  assert(_owner == Self, "invariant");
 
-   // Exit protocol:
-   // 1. ST _succ = wakee
-   // 2. membar #loadstore|#storestore;
-   // 2. ST _owner = NULL
-   // 3. unpark(wakee)
+  // Exit protocol:
+  // 1. ST _succ = wakee
+  // 2. membar #loadstore|#storestore;
+  // 2. ST _owner = NULL
+  // 3. unpark(wakee)
 
-   _succ = Knob_SuccEnabled ? Wakee->_thread : NULL;
-   ParkEvent * Trigger = Wakee->_event;
+  _succ = Knob_SuccEnabled ? Wakee->_thread : NULL;
+  ParkEvent * Trigger = Wakee->_event;
 
-   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
-   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
-   // out-of-scope (non-extant).
-   Wakee  = NULL;
+  // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
+  // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
+  // out-of-scope (non-extant).
+  Wakee  = NULL;
 
-   // Drop the lock
-   OrderAccess::release_store_ptr(&_owner, NULL);
-   OrderAccess::fence();                               // ST _owner vs LD in unpark()
+  // Drop the lock
+  OrderAccess::release_store_ptr(&_owner, NULL);
+  OrderAccess::fence();                               // ST _owner vs LD in unpark()
 
-   if (SafepointSynchronize::do_call_back()) {
-      TEVENT(unpark before SAFEPOINT);
-   }
+  if (SafepointSynchronize::do_call_back()) {
+    TEVENT(unpark before SAFEPOINT);
+  }
 
-   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
-   Trigger->unpark();
+  DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
+  Trigger->unpark();
 
-   // Maintain stats and report events to JVMTI
-   if (ObjectMonitor::_sync_Parks != NULL) {
-      ObjectMonitor::_sync_Parks->inc();
-   }
+  // Maintain stats and report events to JVMTI
+  if (ObjectMonitor::_sync_Parks != NULL) {
+    ObjectMonitor::_sync_Parks->inc();
+  }
 }
 
 
@@ -1337,41 +1337,41 @@
 // inflated monitor, e.g. the monitor can be inflated by a non-owning
 // thread due to contention.
 intptr_t ObjectMonitor::complete_exit(TRAPS) {
-   Thread * const Self = THREAD;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "Must be Java thread!");
+  JavaThread *jt = (JavaThread *)THREAD;
 
-   DeferredInitialize();
+  DeferredInitialize();
 
-   if (THREAD != _owner) {
+  if (THREAD != _owner) {
     if (THREAD->is_lock_owned ((address)_owner)) {
-       assert(_recursions == 0, "internal state error");
-       _owner = THREAD;   /* Convert from basiclock addr to Thread addr */
-       _recursions = 0;
-       OwnerIsThread = 1;
+      assert(_recursions == 0, "internal state error");
+      _owner = THREAD;   /* Convert from basiclock addr to Thread addr */
+      _recursions = 0;
+      OwnerIsThread = 1;
     }
-   }
+  }
 
-   guarantee(Self == _owner, "complete_exit not owner");
-   intptr_t save = _recursions; // record the old recursion count
-   _recursions = 0;        // set the recursion level to be 0
-   exit(true, Self);           // exit the monitor
-   guarantee(_owner != Self, "invariant");
-   return save;
+  guarantee(Self == _owner, "complete_exit not owner");
+  intptr_t save = _recursions; // record the old recursion count
+  _recursions = 0;        // set the recursion level to be 0
+  exit(true, Self);           // exit the monitor
+  guarantee(_owner != Self, "invariant");
+  return save;
 }
 
 // reenter() enters a lock and sets recursion count
 // complete_exit/reenter operate as a wait without waiting
 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
-   Thread * const Self = THREAD;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "Must be Java thread!");
+  JavaThread *jt = (JavaThread *)THREAD;
 
-   guarantee(_owner != Self, "reenter already owner");
-   enter(THREAD);       // enter the monitor
-   guarantee(_recursions == 0, "reenter recursion");
-   _recursions = recursions;
-   return;
+  guarantee(_owner != Self, "reenter already owner");
+  enter(THREAD);       // enter the monitor
+  guarantee(_recursions == 0, "reenter recursion");
+  _recursions = recursions;
+  return;
 }
 
 
@@ -1412,9 +1412,9 @@
 
 // helper method for posting a monitor wait event
 void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
-                                                           jlong notifier_tid,
-                                                           jlong timeout,
-                                                           bool timedout) {
+                                            jlong notifier_tid,
+                                            jlong timeout,
+                                            bool timedout) {
   event->set_klass(((oop)this->object())->klass());
   event->set_timeout((TYPE_ULONG)timeout);
   event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
@@ -1429,232 +1429,232 @@
 // Note: a subset of changes to ObjectMonitor::wait()
 // will need to be replicated in complete_exit
 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
-   Thread * const Self = THREAD;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
+  Thread * const Self = THREAD;
+  assert(Self->is_Java_thread(), "Must be Java thread!");
+  JavaThread *jt = (JavaThread *)THREAD;
 
-   DeferredInitialize();
+  DeferredInitialize();
 
-   // Throw IMSX or IEX.
-   CHECK_OWNER();
+  // Throw IMSX or IEX.
+  CHECK_OWNER();
 
-   EventJavaMonitorWait event;
+  EventJavaMonitorWait event;
 
-   // check for a pending interrupt
-   if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
-     // post monitor waited event.  Note that this is past-tense, we are done waiting.
-     if (JvmtiExport::should_post_monitor_waited()) {
-        // Note: 'false' parameter is passed here because the
-        // wait was not timed out due to thread interrupt.
-        JvmtiExport::post_monitor_waited(jt, this, false);
+  // check for a pending interrupt
+  if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+    // post monitor waited event.  Note that this is past-tense, we are done waiting.
+    if (JvmtiExport::should_post_monitor_waited()) {
+      // Note: 'false' parameter is passed here because the
+      // wait was not timed out due to thread interrupt.
+      JvmtiExport::post_monitor_waited(jt, this, false);
 
-        // In this short circuit of the monitor wait protocol, the
-        // current thread never drops ownership of the monitor and
-        // never gets added to the wait queue so the current thread
-        // cannot be made the successor. This means that the
-        // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
-        // consume an unpark() meant for the ParkEvent associated with
-        // this ObjectMonitor.
-     }
-     if (event.should_commit()) {
-       post_monitor_wait_event(&event, 0, millis, false);
-     }
-     TEVENT(Wait - Throw IEX);
-     THROW(vmSymbols::java_lang_InterruptedException());
-     return;
-   }
+      // In this short circuit of the monitor wait protocol, the
+      // current thread never drops ownership of the monitor and
+      // never gets added to the wait queue so the current thread
+      // cannot be made the successor. This means that the
+      // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
+      // consume an unpark() meant for the ParkEvent associated with
+      // this ObjectMonitor.
+    }
+    if (event.should_commit()) {
+      post_monitor_wait_event(&event, 0, millis, false);
+    }
+    TEVENT(Wait - Throw IEX);
+    THROW(vmSymbols::java_lang_InterruptedException());
+    return;
+  }
 
-   TEVENT(Wait);
+  TEVENT(Wait);
 
-   assert(Self->_Stalled == 0, "invariant");
-   Self->_Stalled = intptr_t(this);
-   jt->set_current_waiting_monitor(this);
+  assert(Self->_Stalled == 0, "invariant");
+  Self->_Stalled = intptr_t(this);
+  jt->set_current_waiting_monitor(this);
 
-   // create a node to be put into the queue
-   // Critically, after we reset() the event but prior to park(), we must check
-   // for a pending interrupt.
-   ObjectWaiter node(Self);
-   node.TState = ObjectWaiter::TS_WAIT;
-   Self->_ParkEvent->reset();
-   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
+  // create a node to be put into the queue
+  // Critically, after we reset() the event but prior to park(), we must check
+  // for a pending interrupt.
+  ObjectWaiter node(Self);
+  node.TState = ObjectWaiter::TS_WAIT;
+  Self->_ParkEvent->reset();
+  OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
 
-   // Enter the waiting queue, which is a circular doubly linked list in this case
-   // but it could be a priority queue or any data structure.
-   // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
-   // by the the owner of the monitor *except* in the case where park()
-   // returns because of a timeout of interrupt.  Contention is exceptionally rare
-   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
+  // Enter the waiting queue, which is a circular doubly linked list in this case
+  // but it could be a priority queue or any data structure.
+  // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
+  // by the the owner of the monitor *except* in the case where park()
+  // returns because of a timeout of interrupt.  Contention is exceptionally rare
+  // so we use a simple spin-lock instead of a heavier-weight blocking lock.
 
-   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
-   AddWaiter(&node);
-   Thread::SpinRelease(&_WaitSetLock);
+  Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
+  AddWaiter(&node);
+  Thread::SpinRelease(&_WaitSetLock);
 
-   if ((SyncFlags & 4) == 0) {
-      _Responsible = NULL;
-   }
-   intptr_t save = _recursions; // record the old recursion count
-   _waiters++;                  // increment the number of waiters
-   _recursions = 0;             // set the recursion level to be 1
-   exit(true, Self);                    // exit the monitor
-   guarantee(_owner != Self, "invariant");
+  if ((SyncFlags & 4) == 0) {
+    _Responsible = NULL;
+  }
+  intptr_t save = _recursions; // record the old recursion count
+  _waiters++;                  // increment the number of waiters
+  _recursions = 0;             // set the recursion level to be 1
+  exit(true, Self);                    // exit the monitor
+  guarantee(_owner != Self, "invariant");
 
-   // The thread is on the WaitSet list - now park() it.
-   // On MP systems it's conceivable that a brief spin before we park
-   // could be profitable.
-   //
-   // TODO-FIXME: change the following logic to a loop of the form
-   //   while (!timeout && !interrupted && _notified == 0) park()
+  // The thread is on the WaitSet list - now park() it.
+  // On MP systems it's conceivable that a brief spin before we park
+  // could be profitable.
+  //
+  // TODO-FIXME: change the following logic to a loop of the form
+  //   while (!timeout && !interrupted && _notified == 0) park()
 
-   int ret = OS_OK;
-   int WasNotified = 0;
-   { // State transition wrappers
-     OSThread* osthread = Self->osthread();
-     OSThreadWaitState osts(osthread, true);
-     {
-       ThreadBlockInVM tbivm(jt);
-       // Thread is in thread_blocked state and oop access is unsafe.
-       jt->set_suspend_equivalent();
+  int ret = OS_OK;
+  int WasNotified = 0;
+  { // State transition wrappers
+    OSThread* osthread = Self->osthread();
+    OSThreadWaitState osts(osthread, true);
+    {
+      ThreadBlockInVM tbivm(jt);
+      // Thread is in thread_blocked state and oop access is unsafe.
+      jt->set_suspend_equivalent();
 
-       if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
-           // Intentionally empty
-       } else
-       if (node._notified == 0) {
-         if (millis <= 0) {
-            Self->_ParkEvent->park();
-         } else {
-            ret = Self->_ParkEvent->park(millis);
-         }
-       }
+      if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
+        // Intentionally empty
+      } else
+      if (node._notified == 0) {
+        if (millis <= 0) {
+          Self->_ParkEvent->park();
+        } else {
+          ret = Self->_ParkEvent->park(millis);
+        }
+      }
 
-       // were we externally suspended while we were waiting?
-       if (ExitSuspendEquivalent (jt)) {
-          // TODO-FIXME: add -- if succ == Self then succ = null.
-          jt->java_suspend_self();
-       }
+      // were we externally suspended while we were waiting?
+      if (ExitSuspendEquivalent (jt)) {
+        // TODO-FIXME: add -- if succ == Self then succ = null.
+        jt->java_suspend_self();
+      }
 
-     } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
+    } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
 
 
-     // Node may be on the WaitSet, the EntryList (or cxq), or in transition
-     // from the WaitSet to the EntryList.
-     // See if we need to remove Node from the WaitSet.
-     // We use double-checked locking to avoid grabbing _WaitSetLock
-     // if the thread is not on the wait queue.
-     //
-     // Note that we don't need a fence before the fetch of TState.
-     // In the worst case we'll fetch a old-stale value of TS_WAIT previously
-     // written by the is thread. (perhaps the fetch might even be satisfied
-     // by a look-aside into the processor's own store buffer, although given
-     // the length of the code path between the prior ST and this load that's
-     // highly unlikely).  If the following LD fetches a stale TS_WAIT value
-     // then we'll acquire the lock and then re-fetch a fresh TState value.
-     // That is, we fail toward safety.
+    // Node may be on the WaitSet, the EntryList (or cxq), or in transition
+    // from the WaitSet to the EntryList.
+    // See if we need to remove Node from the WaitSet.
+    // We use double-checked locking to avoid grabbing _WaitSetLock
+    // if the thread is not on the wait queue.
+    //
+    // Note that we don't need a fence before the fetch of TState.
+    // In the worst case we'll fetch a old-stale value of TS_WAIT previously
+    // written by the is thread. (perhaps the fetch might even be satisfied
+    // by a look-aside into the processor's own store buffer, although given
+    // the length of the code path between the prior ST and this load that's
+    // highly unlikely).  If the following LD fetches a stale TS_WAIT value
+    // then we'll acquire the lock and then re-fetch a fresh TState value.
+    // That is, we fail toward safety.
 
-     if (node.TState == ObjectWaiter::TS_WAIT) {
-         Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
-         if (node.TState == ObjectWaiter::TS_WAIT) {
-            DequeueSpecificWaiter(&node);       // unlink from WaitSet
-            assert(node._notified == 0, "invariant");
-            node.TState = ObjectWaiter::TS_RUN;
-         }
-         Thread::SpinRelease(&_WaitSetLock);
-     }
+    if (node.TState == ObjectWaiter::TS_WAIT) {
+      Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
+      if (node.TState == ObjectWaiter::TS_WAIT) {
+        DequeueSpecificWaiter(&node);       // unlink from WaitSet
+        assert(node._notified == 0, "invariant");
+        node.TState = ObjectWaiter::TS_RUN;
+      }
+      Thread::SpinRelease(&_WaitSetLock);
+    }
 
-     // The thread is now either on off-list (TS_RUN),
-     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
-     // The Node's TState variable is stable from the perspective of this thread.
-     // No other threads will asynchronously modify TState.
-     guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
-     OrderAccess::loadload();
-     if (_succ == Self) _succ = NULL;
-     WasNotified = node._notified;
+    // The thread is now either on off-list (TS_RUN),
+    // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
+    // The Node's TState variable is stable from the perspective of this thread.
+    // No other threads will asynchronously modify TState.
+    guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
+    OrderAccess::loadload();
+    if (_succ == Self) _succ = NULL;
+    WasNotified = node._notified;
 
-     // Reentry phase -- reacquire the monitor.
-     // re-enter contended monitor after object.wait().
-     // retain OBJECT_WAIT state until re-enter successfully completes
-     // Thread state is thread_in_vm and oop access is again safe,
-     // although the raw address of the object may have changed.
-     // (Don't cache naked oops over safepoints, of course).
+    // Reentry phase -- reacquire the monitor.
+    // re-enter contended monitor after object.wait().
+    // retain OBJECT_WAIT state until re-enter successfully completes
+    // Thread state is thread_in_vm and oop access is again safe,
+    // although the raw address of the object may have changed.
+    // (Don't cache naked oops over safepoints, of course).
 
-     // post monitor waited event. Note that this is past-tense, we are done waiting.
-     if (JvmtiExport::should_post_monitor_waited()) {
-       JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
+    // post monitor waited event. Note that this is past-tense, we are done waiting.
+    if (JvmtiExport::should_post_monitor_waited()) {
+      JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
 
-       if (node._notified != 0 && _succ == Self) {
-         // In this part of the monitor wait-notify-reenter protocol it
-         // is possible (and normal) for another thread to do a fastpath
-         // monitor enter-exit while this thread is still trying to get
-         // to the reenter portion of the protocol.
-         //
-         // The ObjectMonitor was notified and the current thread is
-         // the successor which also means that an unpark() has already
-         // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
-         // consume the unpark() that was done when the successor was
-         // set because the same ParkEvent is shared between Java
-         // monitors and JVM/TI RawMonitors (for now).
-         //
-         // We redo the unpark() to ensure forward progress, i.e., we
-         // don't want all pending threads hanging (parked) with none
-         // entering the unlocked monitor.
-         node._event->unpark();
-       }
-     }
+      if (node._notified != 0 && _succ == Self) {
+        // In this part of the monitor wait-notify-reenter protocol it
+        // is possible (and normal) for another thread to do a fastpath
+        // monitor enter-exit while this thread is still trying to get
+        // to the reenter portion of the protocol.
+        //
+        // The ObjectMonitor was notified and the current thread is
+        // the successor which also means that an unpark() has already
+        // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
+        // consume the unpark() that was done when the successor was
+        // set because the same ParkEvent is shared between Java
+        // monitors and JVM/TI RawMonitors (for now).
+        //
+        // We redo the unpark() to ensure forward progress, i.e., we
+        // don't want all pending threads hanging (parked) with none
+        // entering the unlocked monitor.
+        node._event->unpark();
+      }
+    }
 
-     if (event.should_commit()) {
-       post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
-     }
+    if (event.should_commit()) {
+      post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
+    }
 
-     OrderAccess::fence();
+    OrderAccess::fence();
 
-     assert(Self->_Stalled != 0, "invariant");
-     Self->_Stalled = 0;
+    assert(Self->_Stalled != 0, "invariant");
+    Self->_Stalled = 0;
 
-     assert(_owner != Self, "invariant");
-     ObjectWaiter::TStates v = node.TState;
-     if (v == ObjectWaiter::TS_RUN) {
-         enter(Self);
-     } else {
-         guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
-         ReenterI(Self, &node);
-         node.wait_reenter_end(this);
-     }
+    assert(_owner != Self, "invariant");
+    ObjectWaiter::TStates v = node.TState;
+    if (v == ObjectWaiter::TS_RUN) {
+      enter(Self);
+    } else {
+      guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
+      ReenterI(Self, &node);
+      node.wait_reenter_end(this);
+    }
 
-     // Self has reacquired the lock.
-     // Lifecycle - the node representing Self must not appear on any queues.
-     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
-     // want residual elements associated with this thread left on any lists.
-     guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
-     assert(_owner == Self, "invariant");
-     assert(_succ != Self , "invariant");
-   } // OSThreadWaitState()
+    // Self has reacquired the lock.
+    // Lifecycle - the node representing Self must not appear on any queues.
+    // Node is about to go out-of-scope, but even if it were immortal we wouldn't
+    // want residual elements associated with this thread left on any lists.
+    guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
+    assert(_owner == Self, "invariant");
+    assert(_succ != Self , "invariant");
+  } // OSThreadWaitState()
 
-   jt->set_current_waiting_monitor(NULL);
+  jt->set_current_waiting_monitor(NULL);
 
-   guarantee(_recursions == 0, "invariant");
-   _recursions = save;     // restore the old recursion count
-   _waiters--;             // decrement the number of waiters
+  guarantee(_recursions == 0, "invariant");
+  _recursions = save;     // restore the old recursion count
+  _waiters--;             // decrement the number of waiters
 
-   // Verify a few postconditions
-   assert(_owner == Self       , "invariant");
-   assert(_succ  != Self       , "invariant");
-   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+  // Verify a few postconditions
+  assert(_owner == Self       , "invariant");
+  assert(_succ  != Self       , "invariant");
+  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 
-   if (SyncFlags & 32) {
-      OrderAccess::fence();
-   }
+  if (SyncFlags & 32) {
+    OrderAccess::fence();
+  }
 
-   // check if the notification happened
-   if (!WasNotified) {
-     // no, it could be timeout or Thread.interrupt() or both
-     // check for interrupt event, otherwise it is timeout
-     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
-       TEVENT(Wait - throw IEX from epilog);
-       THROW(vmSymbols::java_lang_InterruptedException());
-     }
-   }
+  // check if the notification happened
+  if (!WasNotified) {
+    // no, it could be timeout or Thread.interrupt() or both
+    // check for interrupt event, otherwise it is timeout
+    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+      TEVENT(Wait - throw IEX from epilog);
+      THROW(vmSymbols::java_lang_InterruptedException());
+    }
+  }
 
-   // NOTE: Spurious wake up will be consider as timeout.
-   // Monitor notify has precedence over thread interrupt.
+  // NOTE: Spurious wake up will be consider as timeout.
+  // Monitor notify has precedence over thread interrupt.
 }
 
 
@@ -1666,8 +1666,8 @@
 void ObjectMonitor::notify(TRAPS) {
   CHECK_OWNER();
   if (_WaitSet == NULL) {
-     TEVENT(Empty-Notify);
-     return;
+    TEVENT(Empty-Notify);
+    return;
   }
   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
 
@@ -1676,108 +1676,108 @@
   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
   ObjectWaiter * iterator = DequeueWaiter();
   if (iterator != NULL) {
-     TEVENT(Notify1 - Transfer);
-     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
-     guarantee(iterator->_notified == 0, "invariant");
-     if (Policy != 4) {
-        iterator->TState = ObjectWaiter::TS_ENTER;
-     }
-     iterator->_notified = 1;
-     Thread * Self = THREAD;
-     iterator->_notifier_tid = Self->osthread()->thread_id();
+    TEVENT(Notify1 - Transfer);
+    guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
+    guarantee(iterator->_notified == 0, "invariant");
+    if (Policy != 4) {
+      iterator->TState = ObjectWaiter::TS_ENTER;
+    }
+    iterator->_notified = 1;
+    Thread * Self = THREAD;
+    iterator->_notifier_tid = Self->osthread()->thread_id();
 
-     ObjectWaiter * List = _EntryList;
-     if (List != NULL) {
-        assert(List->_prev == NULL, "invariant");
-        assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
-        assert(List != iterator, "invariant");
-     }
+    ObjectWaiter * List = _EntryList;
+    if (List != NULL) {
+      assert(List->_prev == NULL, "invariant");
+      assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
+      assert(List != iterator, "invariant");
+    }
 
-     if (Policy == 0) {       // prepend to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-             List->_prev = iterator;
-             iterator->_next = List;
-             iterator->_prev = NULL;
-             _EntryList = iterator;
-        }
-     } else
-     if (Policy == 1) {      // append to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-            // CONSIDER:  finding the tail currently requires a linear-time walk of
-            // the EntryList.  We can make tail access constant-time by converting to
-            // a CDLL instead of using our current DLL.
-            ObjectWaiter * Tail;
-            for (Tail = List; Tail->_next != NULL; Tail = Tail->_next);
-            assert(Tail != NULL && Tail->_next == NULL, "invariant");
-            Tail->_next = iterator;
-            iterator->_prev = Tail;
-            iterator->_next = NULL;
-        }
-     } else
-     if (Policy == 2) {      // prepend to cxq
-         // prepend to cxq
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-            iterator->TState = ObjectWaiter::TS_CXQ;
-            for (;;) {
-                ObjectWaiter * Front = _cxq;
-                iterator->_next = Front;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
-                    break;
-                }
-            }
-         }
-     } else
-     if (Policy == 3) {      // append to cxq
+    if (Policy == 0) {       // prepend to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        List->_prev = iterator;
+        iterator->_next = List;
+        iterator->_prev = NULL;
+        _EntryList = iterator;
+      }
+    } else
+    if (Policy == 1) {      // append to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        // CONSIDER:  finding the tail currently requires a linear-time walk of
+        // the EntryList.  We can make tail access constant-time by converting to
+        // a CDLL instead of using our current DLL.
+        ObjectWaiter * Tail;
+        for (Tail = List; Tail->_next != NULL; Tail = Tail->_next);
+        assert(Tail != NULL && Tail->_next == NULL, "invariant");
+        Tail->_next = iterator;
+        iterator->_prev = Tail;
+        iterator->_next = NULL;
+      }
+    } else
+    if (Policy == 2) {      // prepend to cxq
+      // prepend to cxq
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
         iterator->TState = ObjectWaiter::TS_CXQ;
         for (;;) {
-            ObjectWaiter * Tail;
-            Tail = _cxq;
-            if (Tail == NULL) {
-                iterator->_next = NULL;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
-                   break;
-                }
-            } else {
-                while (Tail->_next != NULL) Tail = Tail->_next;
-                Tail->_next = iterator;
-                iterator->_prev = Tail;
-                iterator->_next = NULL;
-                break;
-            }
+          ObjectWaiter * Front = _cxq;
+          iterator->_next = Front;
+          if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+            break;
+          }
         }
-     } else {
-        ParkEvent * ev = iterator->_event;
-        iterator->TState = ObjectWaiter::TS_RUN;
-        OrderAccess::fence();
-        ev->unpark();
-     }
+      }
+    } else
+    if (Policy == 3) {      // append to cxq
+      iterator->TState = ObjectWaiter::TS_CXQ;
+      for (;;) {
+        ObjectWaiter * Tail;
+        Tail = _cxq;
+        if (Tail == NULL) {
+          iterator->_next = NULL;
+          if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+            break;
+          }
+        } else {
+          while (Tail->_next != NULL) Tail = Tail->_next;
+          Tail->_next = iterator;
+          iterator->_prev = Tail;
+          iterator->_next = NULL;
+          break;
+        }
+      }
+    } else {
+      ParkEvent * ev = iterator->_event;
+      iterator->TState = ObjectWaiter::TS_RUN;
+      OrderAccess::fence();
+      ev->unpark();
+    }
 
-     if (Policy < 4) {
-       iterator->wait_reenter_begin(this);
-     }
+    if (Policy < 4) {
+      iterator->wait_reenter_begin(this);
+    }
 
-     // _WaitSetLock protects the wait queue, not the EntryList.  We could
-     // move the add-to-EntryList operation, above, outside the critical section
-     // protected by _WaitSetLock.  In practice that's not useful.  With the
-     // exception of  wait() timeouts and interrupts the monitor owner
-     // is the only thread that grabs _WaitSetLock.  There's almost no contention
-     // on _WaitSetLock so it's not profitable to reduce the length of the
-     // critical section.
+    // _WaitSetLock protects the wait queue, not the EntryList.  We could
+    // move the add-to-EntryList operation, above, outside the critical section
+    // protected by _WaitSetLock.  In practice that's not useful.  With the
+    // exception of  wait() timeouts and interrupts the monitor owner
+    // is the only thread that grabs _WaitSetLock.  There's almost no contention
+    // on _WaitSetLock so it's not profitable to reduce the length of the
+    // critical section.
   }
 
   Thread::SpinRelease(&_WaitSetLock);
 
   if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
-     ObjectMonitor::_sync_Notifications->inc();
+    ObjectMonitor::_sync_Notifications->inc();
   }
 }
 
@@ -1786,8 +1786,8 @@
   CHECK_OWNER();
   ObjectWaiter* iterator;
   if (_WaitSet == NULL) {
-      TEVENT(Empty-NotifyAll);
-      return;
+    TEVENT(Empty-NotifyAll);
+    return;
   }
   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
 
@@ -1796,112 +1796,112 @@
   Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notifyall");
 
   for (;;) {
-     iterator = DequeueWaiter();
-     if (iterator == NULL) break;
-     TEVENT(NotifyAll - Transfer1);
-     ++Tally;
+    iterator = DequeueWaiter();
+    if (iterator == NULL) break;
+    TEVENT(NotifyAll - Transfer1);
+    ++Tally;
+
+    // Disposition - what might we do with iterator ?
+    // a.  add it directly to the EntryList - either tail or head.
+    // b.  push it onto the front of the _cxq.
+    // For now we use (a).
 
-     // Disposition - what might we do with iterator ?
-     // a.  add it directly to the EntryList - either tail or head.
-     // b.  push it onto the front of the _cxq.
-     // For now we use (a).
+    guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
+    guarantee(iterator->_notified == 0, "invariant");
+    iterator->_notified = 1;
+    Thread * Self = THREAD;
+    iterator->_notifier_tid = Self->osthread()->thread_id();
+    if (Policy != 4) {
+      iterator->TState = ObjectWaiter::TS_ENTER;
+    }
+
+    ObjectWaiter * List = _EntryList;
+    if (List != NULL) {
+      assert(List->_prev == NULL, "invariant");
+      assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
+      assert(List != iterator, "invariant");
+    }
 
-     guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
-     guarantee(iterator->_notified == 0, "invariant");
-     iterator->_notified = 1;
-     Thread * Self = THREAD;
-     iterator->_notifier_tid = Self->osthread()->thread_id();
-     if (Policy != 4) {
-        iterator->TState = ObjectWaiter::TS_ENTER;
-     }
-
-     ObjectWaiter * List = _EntryList;
-     if (List != NULL) {
-        assert(List->_prev == NULL, "invariant");
-        assert(List->TState == ObjectWaiter::TS_ENTER, "invariant");
-        assert(List != iterator, "invariant");
-     }
-
-     if (Policy == 0) {       // prepend to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-             List->_prev = iterator;
-             iterator->_next = List;
-             iterator->_prev = NULL;
-             _EntryList = iterator;
+    if (Policy == 0) {       // prepend to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        List->_prev = iterator;
+        iterator->_next = List;
+        iterator->_prev = NULL;
+        _EntryList = iterator;
+      }
+    } else
+    if (Policy == 1) {      // append to EntryList
+      if (List == NULL) {
+        iterator->_next = iterator->_prev = NULL;
+        _EntryList = iterator;
+      } else {
+        // CONSIDER:  finding the tail currently requires a linear-time walk of
+        // the EntryList.  We can make tail access constant-time by converting to
+        // a CDLL instead of using our current DLL.
+        ObjectWaiter * Tail;
+        for (Tail = List; Tail->_next != NULL; Tail = Tail->_next);
+        assert(Tail != NULL && Tail->_next == NULL, "invariant");
+        Tail->_next = iterator;
+        iterator->_prev = Tail;
+        iterator->_next = NULL;
+      }
+    } else
+    if (Policy == 2) {      // prepend to cxq
+      // prepend to cxq
+      iterator->TState = ObjectWaiter::TS_CXQ;
+      for (;;) {
+        ObjectWaiter * Front = _cxq;
+        iterator->_next = Front;
+        if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+          break;
         }
-     } else
-     if (Policy == 1) {      // append to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL;
-             _EntryList = iterator;
-         } else {
-            // CONSIDER:  finding the tail currently requires a linear-time walk of
-            // the EntryList.  We can make tail access constant-time by converting to
-            // a CDLL instead of using our current DLL.
-            ObjectWaiter * Tail;
-            for (Tail = List; Tail->_next != NULL; Tail = Tail->_next);
-            assert(Tail != NULL && Tail->_next == NULL, "invariant");
-            Tail->_next = iterator;
-            iterator->_prev = Tail;
-            iterator->_next = NULL;
+      }
+    } else
+    if (Policy == 3) {      // append to cxq
+      iterator->TState = ObjectWaiter::TS_CXQ;
+      for (;;) {
+        ObjectWaiter * Tail;
+        Tail = _cxq;
+        if (Tail == NULL) {
+          iterator->_next = NULL;
+          if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+            break;
+          }
+        } else {
+          while (Tail->_next != NULL) Tail = Tail->_next;
+          Tail->_next = iterator;
+          iterator->_prev = Tail;
+          iterator->_next = NULL;
+          break;
         }
-     } else
-     if (Policy == 2) {      // prepend to cxq
-         // prepend to cxq
-         iterator->TState = ObjectWaiter::TS_CXQ;
-         for (;;) {
-             ObjectWaiter * Front = _cxq;
-             iterator->_next = Front;
-             if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
-                 break;
-             }
-         }
-     } else
-     if (Policy == 3) {      // append to cxq
-        iterator->TState = ObjectWaiter::TS_CXQ;
-        for (;;) {
-            ObjectWaiter * Tail;
-            Tail = _cxq;
-            if (Tail == NULL) {
-                iterator->_next = NULL;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
-                   break;
-                }
-            } else {
-                while (Tail->_next != NULL) Tail = Tail->_next;
-                Tail->_next = iterator;
-                iterator->_prev = Tail;
-                iterator->_next = NULL;
-                break;
-            }
-        }
-     } else {
-        ParkEvent * ev = iterator->_event;
-        iterator->TState = ObjectWaiter::TS_RUN;
-        OrderAccess::fence();
-        ev->unpark();
-     }
+      }
+    } else {
+      ParkEvent * ev = iterator->_event;
+      iterator->TState = ObjectWaiter::TS_RUN;
+      OrderAccess::fence();
+      ev->unpark();
+    }
 
-     if (Policy < 4) {
-       iterator->wait_reenter_begin(this);
-     }
+    if (Policy < 4) {
+      iterator->wait_reenter_begin(this);
+    }
 
-     // _WaitSetLock protects the wait queue, not the EntryList.  We could
-     // move the add-to-EntryList operation, above, outside the critical section
-     // protected by _WaitSetLock.  In practice that's not useful.  With the
-     // exception of  wait() timeouts and interrupts the monitor owner
-     // is the only thread that grabs _WaitSetLock.  There's almost no contention
-     // on _WaitSetLock so it's not profitable to reduce the length of the
-     // critical section.
+    // _WaitSetLock protects the wait queue, not the EntryList.  We could
+    // move the add-to-EntryList operation, above, outside the critical section
+    // protected by _WaitSetLock.  In practice that's not useful.  With the
+    // exception of  wait() timeouts and interrupts the monitor owner
+    // is the only thread that grabs _WaitSetLock.  There's almost no contention
+    // on _WaitSetLock so it's not profitable to reduce the length of the
+    // critical section.
   }
 
   Thread::SpinRelease(&_WaitSetLock);
 
   if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
-     ObjectMonitor::_sync_Notifications->inc(Tally);
+    ObjectMonitor::_sync_Notifications->inc(Tally);
   }
 }
 
@@ -1979,227 +1979,227 @@
 
 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
 
-    // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
-    int ctr = Knob_FixedSpin;
-    if (ctr != 0) {
-        while (--ctr >= 0) {
-            if (TryLock(Self) > 0) return 1;
-            SpinPause();
-        }
-        return 0;
+  // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
+  int ctr = Knob_FixedSpin;
+  if (ctr != 0) {
+    while (--ctr >= 0) {
+      if (TryLock(Self) > 0) return 1;
+      SpinPause();
+    }
+    return 0;
+  }
+
+  for (ctr = Knob_PreSpin + 1; --ctr >= 0;) {
+    if (TryLock(Self) > 0) {
+      // Increase _SpinDuration ...
+      // Note that we don't clamp SpinDuration precisely at SpinLimit.
+      // Raising _SpurDuration to the poverty line is key.
+      int x = _SpinDuration;
+      if (x < Knob_SpinLimit) {
+        if (x < Knob_Poverty) x = Knob_Poverty;
+        _SpinDuration = x + Knob_BonusB;
+      }
+      return 1;
+    }
+    SpinPause();
+  }
+
+  // Admission control - verify preconditions for spinning
+  //
+  // We always spin a little bit, just to prevent _SpinDuration == 0 from
+  // becoming an absorbing state.  Put another way, we spin briefly to
+  // sample, just in case the system load, parallelism, contention, or lock
+  // modality changed.
+  //
+  // Consider the following alternative:
+  // Periodically set _SpinDuration = _SpinLimit and try a long/full
+  // spin attempt.  "Periodically" might mean after a tally of
+  // the # of failed spin attempts (or iterations) reaches some threshold.
+  // This takes us into the realm of 1-out-of-N spinning, where we
+  // hold the duration constant but vary the frequency.
+
+  ctr = _SpinDuration;
+  if (ctr < Knob_SpinBase) ctr = Knob_SpinBase;
+  if (ctr <= 0) return 0;
+
+  if (Knob_SuccRestrict && _succ != NULL) return 0;
+  if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
+    TEVENT(Spin abort - notrunnable [TOP]);
+    return 0;
+  }
+
+  int MaxSpin = Knob_MaxSpinners;
+  if (MaxSpin >= 0) {
+    if (_Spinner > MaxSpin) {
+      TEVENT(Spin abort -- too many spinners);
+      return 0;
+    }
+    // Slightly racy, but benign ...
+    Adjust(&_Spinner, 1);
+  }
+
+  // We're good to spin ... spin ingress.
+  // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
+  // when preparing to LD...CAS _owner, etc and the CAS is likely
+  // to succeed.
+  int hits    = 0;
+  int msk     = 0;
+  int caspty  = Knob_CASPenalty;
+  int oxpty   = Knob_OXPenalty;
+  int sss     = Knob_SpinSetSucc;
+  if (sss && _succ == NULL) _succ = Self;
+  Thread * prv = NULL;
+
+  // There are three ways to exit the following loop:
+  // 1.  A successful spin where this thread has acquired the lock.
+  // 2.  Spin failure with prejudice
+  // 3.  Spin failure without prejudice
+
+  while (--ctr >= 0) {
+
+    // Periodic polling -- Check for pending GC
+    // Threads may spin while they're unsafe.
+    // We don't want spinning threads to delay the JVM from reaching
+    // a stop-the-world safepoint or to steal cycles from GC.
+    // If we detect a pending safepoint we abort in order that
+    // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
+    // this thread, if safe, doesn't steal cycles from GC.
+    // This is in keeping with the "no loitering in runtime" rule.
+    // We periodically check to see if there's a safepoint pending.
+    if ((ctr & 0xFF) == 0) {
+      if (SafepointSynchronize::do_call_back()) {
+        TEVENT(Spin: safepoint);
+        goto Abort;           // abrupt spin egress
+      }
+      if (Knob_UsePause & 1) SpinPause();
+
+      int (*scb)(intptr_t,int) = SpinCallbackFunction;
+      if (hits > 50 && scb != NULL) {
+        int abend = (*scb)(SpinCallbackArgument, 0);
+      }
     }
 
-    for (ctr = Knob_PreSpin + 1; --ctr >= 0;) {
-      if (TryLock(Self) > 0) {
-        // Increase _SpinDuration ...
+    if (Knob_UsePause & 2) SpinPause();
+
+    // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
+    // This is useful on classic SMP systems, but is of less utility on
+    // N1-style CMT platforms.
+    //
+    // Trade-off: lock acquisition latency vs coherency bandwidth.
+    // Lock hold times are typically short.  A histogram
+    // of successful spin attempts shows that we usually acquire
+    // the lock early in the spin.  That suggests we want to
+    // sample _owner frequently in the early phase of the spin,
+    // but then back-off and sample less frequently as the spin
+    // progresses.  The back-off makes a good citizen on SMP big
+    // SMP systems.  Oversampling _owner can consume excessive
+    // coherency bandwidth.  Relatedly, if we _oversample _owner we
+    // can inadvertently interfere with the the ST m->owner=null.
+    // executed by the lock owner.
+    if (ctr & msk) continue;
+    ++hits;
+    if ((hits & 0xF) == 0) {
+      // The 0xF, above, corresponds to the exponent.
+      // Consider: (msk+1)|msk
+      msk = ((msk << 2)|3) & BackOffMask;
+    }
+
+    // Probe _owner with TATAS
+    // If this thread observes the monitor transition or flicker
+    // from locked to unlocked to locked, then the odds that this
+    // thread will acquire the lock in this spin attempt go down
+    // considerably.  The same argument applies if the CAS fails
+    // or if we observe _owner change from one non-null value to
+    // another non-null value.   In such cases we might abort
+    // the spin without prejudice or apply a "penalty" to the
+    // spin count-down variable "ctr", reducing it by 100, say.
+
+    Thread * ox = (Thread *) _owner;
+    if (ox == NULL) {
+      ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL);
+      if (ox == NULL) {
+        // The CAS succeeded -- this thread acquired ownership
+        // Take care of some bookkeeping to exit spin state.
+        if (sss && _succ == Self) {
+          _succ = NULL;
+        }
+        if (MaxSpin > 0) Adjust(&_Spinner, -1);
+
+        // Increase _SpinDuration :
+        // The spin was successful (profitable) so we tend toward
+        // longer spin attempts in the future.
+        // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
+        // If we acquired the lock early in the spin cycle it
+        // makes sense to increase _SpinDuration proportionally.
         // Note that we don't clamp SpinDuration precisely at SpinLimit.
-        // Raising _SpurDuration to the poverty line is key.
         int x = _SpinDuration;
         if (x < Knob_SpinLimit) {
-           if (x < Knob_Poverty) x = Knob_Poverty;
-           _SpinDuration = x + Knob_BonusB;
+          if (x < Knob_Poverty) x = Knob_Poverty;
+          _SpinDuration = x + Knob_Bonus;
         }
         return 1;
       }
-      SpinPause();
-    }
 
-    // Admission control - verify preconditions for spinning
-    //
-    // We always spin a little bit, just to prevent _SpinDuration == 0 from
-    // becoming an absorbing state.  Put another way, we spin briefly to
-    // sample, just in case the system load, parallelism, contention, or lock
-    // modality changed.
-    //
-    // Consider the following alternative:
-    // Periodically set _SpinDuration = _SpinLimit and try a long/full
-    // spin attempt.  "Periodically" might mean after a tally of
-    // the # of failed spin attempts (or iterations) reaches some threshold.
-    // This takes us into the realm of 1-out-of-N spinning, where we
-    // hold the duration constant but vary the frequency.
-
-    ctr = _SpinDuration;
-    if (ctr < Knob_SpinBase) ctr = Knob_SpinBase;
-    if (ctr <= 0) return 0;
-
-    if (Knob_SuccRestrict && _succ != NULL) return 0;
-    if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
-       TEVENT(Spin abort - notrunnable [TOP]);
-       return 0;
-    }
-
-    int MaxSpin = Knob_MaxSpinners;
-    if (MaxSpin >= 0) {
-       if (_Spinner > MaxSpin) {
-          TEVENT(Spin abort -- too many spinners);
-          return 0;
-       }
-       // Slightly racy, but benign ...
-       Adjust(&_Spinner, 1);
+      // The CAS failed ... we can take any of the following actions:
+      // * penalize: ctr -= Knob_CASPenalty
+      // * exit spin with prejudice -- goto Abort;
+      // * exit spin without prejudice.
+      // * Since CAS is high-latency, retry again immediately.
+      prv = ox;
+      TEVENT(Spin: cas failed);
+      if (caspty == -2) break;
+      if (caspty == -1) goto Abort;
+      ctr -= caspty;
+      continue;
     }
 
-    // We're good to spin ... spin ingress.
-    // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
-    // when preparing to LD...CAS _owner, etc and the CAS is likely
-    // to succeed.
-    int hits    = 0;
-    int msk     = 0;
-    int caspty  = Knob_CASPenalty;
-    int oxpty   = Knob_OXPenalty;
-    int sss     = Knob_SpinSetSucc;
-    if (sss && _succ == NULL) _succ = Self;
-    Thread * prv = NULL;
-
-    // There are three ways to exit the following loop:
-    // 1.  A successful spin where this thread has acquired the lock.
-    // 2.  Spin failure with prejudice
-    // 3.  Spin failure without prejudice
-
-    while (--ctr >= 0) {
-
-      // Periodic polling -- Check for pending GC
-      // Threads may spin while they're unsafe.
-      // We don't want spinning threads to delay the JVM from reaching
-      // a stop-the-world safepoint or to steal cycles from GC.
-      // If we detect a pending safepoint we abort in order that
-      // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
-      // this thread, if safe, doesn't steal cycles from GC.
-      // This is in keeping with the "no loitering in runtime" rule.
-      // We periodically check to see if there's a safepoint pending.
-      if ((ctr & 0xFF) == 0) {
-         if (SafepointSynchronize::do_call_back()) {
-            TEVENT(Spin: safepoint);
-            goto Abort;           // abrupt spin egress
-         }
-         if (Knob_UsePause & 1) SpinPause();
-
-         int (*scb)(intptr_t,int) = SpinCallbackFunction;
-         if (hits > 50 && scb != NULL) {
-            int abend = (*scb)(SpinCallbackArgument, 0);
-         }
-      }
-
-      if (Knob_UsePause & 2) SpinPause();
-
-      // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
-      // This is useful on classic SMP systems, but is of less utility on
-      // N1-style CMT platforms.
-      //
-      // Trade-off: lock acquisition latency vs coherency bandwidth.
-      // Lock hold times are typically short.  A histogram
-      // of successful spin attempts shows that we usually acquire
-      // the lock early in the spin.  That suggests we want to
-      // sample _owner frequently in the early phase of the spin,
-      // but then back-off and sample less frequently as the spin
-      // progresses.  The back-off makes a good citizen on SMP big
-      // SMP systems.  Oversampling _owner can consume excessive
-      // coherency bandwidth.  Relatedly, if we _oversample _owner we
-      // can inadvertently interfere with the the ST m->owner=null.
-      // executed by the lock owner.
-      if (ctr & msk) continue;
-      ++hits;
-      if ((hits & 0xF) == 0) {
-        // The 0xF, above, corresponds to the exponent.
-        // Consider: (msk+1)|msk
-        msk = ((msk << 2)|3) & BackOffMask;
-      }
+    // Did lock ownership change hands ?
+    if (ox != prv && prv != NULL) {
+      TEVENT(spin: Owner changed)
+      if (oxpty == -2) break;
+      if (oxpty == -1) goto Abort;
+      ctr -= oxpty;
+    }
+    prv = ox;
 
-      // Probe _owner with TATAS
-      // If this thread observes the monitor transition or flicker
-      // from locked to unlocked to locked, then the odds that this
-      // thread will acquire the lock in this spin attempt go down
-      // considerably.  The same argument applies if the CAS fails
-      // or if we observe _owner change from one non-null value to
-      // another non-null value.   In such cases we might abort
-      // the spin without prejudice or apply a "penalty" to the
-      // spin count-down variable "ctr", reducing it by 100, say.
-
-      Thread * ox = (Thread *) _owner;
-      if (ox == NULL) {
-         ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL);
-         if (ox == NULL) {
-            // The CAS succeeded -- this thread acquired ownership
-            // Take care of some bookkeeping to exit spin state.
-            if (sss && _succ == Self) {
-               _succ = NULL;
-            }
-            if (MaxSpin > 0) Adjust(&_Spinner, -1);
-
-            // Increase _SpinDuration :
-            // The spin was successful (profitable) so we tend toward
-            // longer spin attempts in the future.
-            // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
-            // If we acquired the lock early in the spin cycle it
-            // makes sense to increase _SpinDuration proportionally.
-            // Note that we don't clamp SpinDuration precisely at SpinLimit.
-            int x = _SpinDuration;
-            if (x < Knob_SpinLimit) {
-                if (x < Knob_Poverty) x = Knob_Poverty;
-                _SpinDuration = x + Knob_Bonus;
-            }
-            return 1;
-         }
+    // Abort the spin if the owner is not executing.
+    // The owner must be executing in order to drop the lock.
+    // Spinning while the owner is OFFPROC is idiocy.
+    // Consider: ctr -= RunnablePenalty ;
+    if (Knob_OState && NotRunnable (Self, ox)) {
+      TEVENT(Spin abort - notrunnable);
+      goto Abort;
+    }
+    if (sss && _succ == NULL) _succ = Self;
+  }
 
-         // The CAS failed ... we can take any of the following actions:
-         // * penalize: ctr -= Knob_CASPenalty
-         // * exit spin with prejudice -- goto Abort;
-         // * exit spin without prejudice.
-         // * Since CAS is high-latency, retry again immediately.
-         prv = ox;
-         TEVENT(Spin: cas failed);
-         if (caspty == -2) break;
-         if (caspty == -1) goto Abort;
-         ctr -= caspty;
-         continue;
-      }
-
-      // Did lock ownership change hands ?
-      if (ox != prv && prv != NULL) {
-          TEVENT(spin: Owner changed)
-          if (oxpty == -2) break;
-          if (oxpty == -1) goto Abort;
-          ctr -= oxpty;
-      }
-      prv = ox;
-
-      // Abort the spin if the owner is not executing.
-      // The owner must be executing in order to drop the lock.
-      // Spinning while the owner is OFFPROC is idiocy.
-      // Consider: ctr -= RunnablePenalty ;
-      if (Knob_OState && NotRunnable (Self, ox)) {
-         TEVENT(Spin abort - notrunnable);
-         goto Abort;
-      }
-      if (sss && _succ == NULL) _succ = Self;
-   }
-
-   // Spin failed with prejudice -- reduce _SpinDuration.
-   // TODO: Use an AIMD-like policy to adjust _SpinDuration.
-   // AIMD is globally stable.
-   TEVENT(Spin failure);
-   {
-     int x = _SpinDuration;
-     if (x > 0) {
-        // Consider an AIMD scheme like: x -= (x >> 3) + 100
-        // This is globally sample and tends to damp the response.
-        x -= Knob_Penalty;
-        if (x < 0) x = 0;
-        _SpinDuration = x;
-     }
-   }
+  // Spin failed with prejudice -- reduce _SpinDuration.
+  // TODO: Use an AIMD-like policy to adjust _SpinDuration.
+  // AIMD is globally stable.
+  TEVENT(Spin failure);
+  {
+    int x = _SpinDuration;
+    if (x > 0) {
+      // Consider an AIMD scheme like: x -= (x >> 3) + 100
+      // This is globally sample and tends to damp the response.
+      x -= Knob_Penalty;
+      if (x < 0) x = 0;
+      _SpinDuration = x;
+    }
+  }
 
  Abort:
-   if (MaxSpin >= 0) Adjust(&_Spinner, -1);
-   if (sss && _succ == Self) {
-      _succ = NULL;
-      // Invariant: after setting succ=null a contending thread
-      // must recheck-retry _owner before parking.  This usually happens
-      // in the normal usage of TrySpin(), but it's safest
-      // to make TrySpin() as foolproof as possible.
-      OrderAccess::fence();
-      if (TryLock(Self) > 0) return 1;
-   }
-   return 0;
+  if (MaxSpin >= 0) Adjust(&_Spinner, -1);
+  if (sss && _succ == Self) {
+    _succ = NULL;
+    // Invariant: after setting succ=null a contending thread
+    // must recheck-retry _owner before parking.  This usually happens
+    // in the normal usage of TrySpin(), but it's safest
+    // to make TrySpin() as foolproof as possible.
+    OrderAccess::fence();
+    if (TryLock(Self) > 0) return 1;
+  }
+  return 0;
 }
 
 // NotRunnable() -- informed spinning
@@ -2242,29 +2242,29 @@
 
 
 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
-    // Check either OwnerIsThread or ox->TypeTag == 2BAD.
-    if (!OwnerIsThread) return 0;
+  // Check either OwnerIsThread or ox->TypeTag == 2BAD.
+  if (!OwnerIsThread) return 0;
 
-    if (ox == NULL) return 0;
+  if (ox == NULL) return 0;
 
-    // Avoid transitive spinning ...
-    // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
-    // Immediately after T1 acquires L it's possible that T2, also
-    // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
-    // This occurs transiently after T1 acquired L but before
-    // T1 managed to clear T1.Stalled.  T2 does not need to abort
-    // its spin in this circumstance.
-    intptr_t BlockedOn = SafeFetchN((intptr_t *) &ox->_Stalled, intptr_t(1));
+  // Avoid transitive spinning ...
+  // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
+  // Immediately after T1 acquires L it's possible that T2, also
+  // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
+  // This occurs transiently after T1 acquired L but before
+  // T1 managed to clear T1.Stalled.  T2 does not need to abort
+  // its spin in this circumstance.
+  intptr_t BlockedOn = SafeFetchN((intptr_t *) &ox->_Stalled, intptr_t(1));
 
-    if (BlockedOn == 1) return 1;
-    if (BlockedOn != 0) {
-      return BlockedOn != intptr_t(this) && _owner == ox;
-    }
+  if (BlockedOn == 1) return 1;
+  if (BlockedOn != 0) {
+    return BlockedOn != intptr_t(this) && _owner == ox;
+  }
 
-    assert(sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant");
-    int jst = SafeFetch32((int *) &((JavaThread *) ox)->_thread_state, -1);;
-    // consider also: jst != _thread_in_Java -- but that's overspecific.
-    return jst == _thread_blocked || jst == _thread_in_native;
+  assert(sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant");
+  int jst = SafeFetch32((int *) &((JavaThread *) ox)->_thread_state, -1);;
+  // consider also: jst != _thread_in_Java -- but that's overspecific.
+  return jst == _thread_blocked || jst == _thread_in_native;
 }
 
 
@@ -2377,27 +2377,27 @@
   assert(InitializationCompleted == 0, "invariant");
   InitializationCompleted = 1;
   if (UsePerfData) {
-      EXCEPTION_MARK;
+    EXCEPTION_MARK;
       #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
       #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
-      NEWPERFCOUNTER(_sync_Inflations);
-      NEWPERFCOUNTER(_sync_Deflations);
-      NEWPERFCOUNTER(_sync_ContendedLockAttempts);
-      NEWPERFCOUNTER(_sync_FutileWakeups);
-      NEWPERFCOUNTER(_sync_Parks);
-      NEWPERFCOUNTER(_sync_EmptyNotifications);
-      NEWPERFCOUNTER(_sync_Notifications);
-      NEWPERFCOUNTER(_sync_SlowEnter);
-      NEWPERFCOUNTER(_sync_SlowExit);
-      NEWPERFCOUNTER(_sync_SlowNotify);
-      NEWPERFCOUNTER(_sync_SlowNotifyAll);
-      NEWPERFCOUNTER(_sync_FailedSpins);
-      NEWPERFCOUNTER(_sync_SuccessfulSpins);
-      NEWPERFCOUNTER(_sync_PrivateA);
-      NEWPERFCOUNTER(_sync_PrivateB);
-      NEWPERFCOUNTER(_sync_MonInCirculation);
-      NEWPERFCOUNTER(_sync_MonScavenged);
-      NEWPERFVARIABLE(_sync_MonExtant);
+    NEWPERFCOUNTER(_sync_Inflations);
+    NEWPERFCOUNTER(_sync_Deflations);
+    NEWPERFCOUNTER(_sync_ContendedLockAttempts);
+    NEWPERFCOUNTER(_sync_FutileWakeups);
+    NEWPERFCOUNTER(_sync_Parks);
+    NEWPERFCOUNTER(_sync_EmptyNotifications);
+    NEWPERFCOUNTER(_sync_Notifications);
+    NEWPERFCOUNTER(_sync_SlowEnter);
+    NEWPERFCOUNTER(_sync_SlowExit);
+    NEWPERFCOUNTER(_sync_SlowNotify);
+    NEWPERFCOUNTER(_sync_SlowNotifyAll);
+    NEWPERFCOUNTER(_sync_FailedSpins);
+    NEWPERFCOUNTER(_sync_SuccessfulSpins);
+    NEWPERFCOUNTER(_sync_PrivateA);
+    NEWPERFCOUNTER(_sync_PrivateB);
+    NEWPERFCOUNTER(_sync_MonInCirculation);
+    NEWPERFCOUNTER(_sync_MonScavenged);
+    NEWPERFVARIABLE(_sync_MonExtant);
       #undef NEWPERFCOUNTER
   }
 }
@@ -2417,33 +2417,33 @@
 
 
 static char * kvGet (char * kvList, const char * Key) {
-    if (kvList == NULL) return NULL;
-    size_t n = strlen(Key);
-    char * Search;
-    for (Search = kvList; *Search; Search += strlen(Search) + 1) {
-        if (strncmp (Search, Key, n) == 0) {
-            if (Search[n] == '=') return Search + n + 1;
-            if (Search[n] == 0)   return(char *) "1";
-        }
+  if (kvList == NULL) return NULL;
+  size_t n = strlen(Key);
+  char * Search;
+  for (Search = kvList; *Search; Search += strlen(Search) + 1) {
+    if (strncmp (Search, Key, n) == 0) {
+      if (Search[n] == '=') return Search + n + 1;
+      if (Search[n] == 0)   return(char *) "1";
     }
-    return NULL;
+  }
+  return NULL;
 }
 
 static int kvGetInt (char * kvList, const char * Key, int Default) {
-    char * v = kvGet(kvList, Key);
-    int rslt = v ? ::strtol(v, NULL, 0) : Default;
-    if (Knob_ReportSettings && v != NULL) {
-        ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
-        ::fflush(stdout);
-    }
-    return rslt;
+  char * v = kvGet(kvList, Key);
+  int rslt = v ? ::strtol(v, NULL, 0) : Default;
+  if (Knob_ReportSettings && v != NULL) {
+    ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
+    ::fflush(stdout);
+  }
+  return rslt;
 }
 
 void ObjectMonitor::DeferredInitialize() {
   if (InitDone > 0) return;
   if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
-      while (InitDone != 1);
-      return;
+    while (InitDone != 1);
+    return;
   }
 
   // One-shot global initialization ...
@@ -2457,13 +2457,13 @@
   size_t sz = strlen(SyncKnobs);
   char * knobs = (char *) malloc(sz + 2);
   if (knobs == NULL) {
-     vm_exit_out_of_memory(sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs");
-     guarantee(0, "invariant");
+    vm_exit_out_of_memory(sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs");
+    guarantee(0, "invariant");
   }
   strcpy(knobs, SyncKnobs);
   knobs[sz+1] = 0;
   for (char * p = knobs; *p; p++) {
-     if (*p == ':') *p = 0;
+    if (*p == ':') *p = 0;
   }
 
   #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
@@ -2502,18 +2502,18 @@
   }
 
   if (os::is_MP()) {
-     BackOffMask = (1 << Knob_SpinBackOff) - 1;
-     if (Knob_ReportSettings) ::printf("BackOffMask=%X\n", BackOffMask);
-     // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
+    BackOffMask = (1 << Knob_SpinBackOff) - 1;
+    if (Knob_ReportSettings) ::printf("BackOffMask=%X\n", BackOffMask);
+    // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
   } else {
-     Knob_SpinLimit = 0;
-     Knob_SpinBase  = 0;
-     Knob_PreSpin   = 0;
-     Knob_FixedSpin = -1;
+    Knob_SpinLimit = 0;
+    Knob_SpinBase  = 0;
+    Knob_PreSpin   = 0;
+    Knob_FixedSpin = -1;
   }
 
   if (Knob_LogSpins == 0) {
-     ObjectMonitor::_sync_FailedSpins = NULL;
+    ObjectMonitor::_sync_FailedSpins = NULL;
   }
 
   free(knobs);
--- a/hotspot/src/share/vm/runtime/objectMonitor.hpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.hpp	Wed Sep 10 11:48:20 2014 -0600
@@ -141,7 +141,7 @@
     _header       = NULL;
     _count        = 0;
     _waiters      = 0,
-    _recursions   = 0;
+      _recursions   = 0;
     _object       = NULL;
     _owner        = NULL;
     _WaitSet      = NULL;
@@ -158,12 +158,12 @@
   }
 
   ~ObjectMonitor() {
-   // TODO: Add asserts ...
-   // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
-   // _count == 0 _EntryList  == NULL etc
+    // TODO: Add asserts ...
+    // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
+    // _count == 0 _EntryList  == NULL etc
   }
 
-private:
+ private:
   void Recycle() {
     // TODO: add stronger asserts ...
     // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
@@ -180,7 +180,7 @@
     OwnerIsThread  = 0;
   }
 
-public:
+ public:
 
   void*     object() const;
   void*     object_addr();
@@ -225,9 +225,9 @@
   void      ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
   bool      ExitSuspendEquivalent(JavaThread * Self);
   void      post_monitor_wait_event(EventJavaMonitorWait * event,
-                                                   jlong notifier_tid,
-                                                   jlong timeout,
-                                                   bool timedout);
+                                    jlong notifier_tid,
+                                    jlong timeout,
+                                    bool timedout);
 
  private:
   friend class ObjectSynchronizer;
@@ -253,8 +253,8 @@
  private:
   int OwnerIsThread;               // _owner is (Thread *) vs SP/BasicLock
   ObjectWaiter * volatile _cxq;    // LL of recently-arrived threads blocked on entry.
-                                    // The list is actually composed of WaitNodes, acting
-                                    // as proxies for Threads.
+  // The list is actually composed of WaitNodes, acting
+  // as proxies for Threads.
  protected:
   ObjectWaiter * volatile _EntryList;     // Threads blocked on entry or reentry.
  private:
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Sep 10 11:48:20 2014 -0600
@@ -50,8 +50,8 @@
 
  private:
   static methodHandle resolve_sub_helper(JavaThread *thread,
-                                     bool is_virtual,
-                                     bool is_optimized, TRAPS);
+                                         bool is_virtual,
+                                         bool is_optimized, TRAPS);
 
   // Shared stub locations
 
@@ -309,11 +309,11 @@
                                      bool is_virtual,
                                      bool is_optimized, TRAPS);
 
-  private:
+ private:
   // deopt blob
   static void generate_deopt_blob(void);
 
-  public:
+ public:
   static DeoptimizationBlob* deopt_blob(void)      { return _deopt_blob; }
 
   // Resets a call-site in compiled code so it will get resolved again.
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Wed Sep 10 11:48:20 2014 -0600
@@ -43,7 +43,7 @@
 #include "utilities/preserveException.hpp"
 
 #if defined(__GNUC__) && !defined(PPC64)
-  // Need to inhibit inlining for older versions of GCC to avoid build-time failures
+// Need to inhibit inlining for older versions of GCC to avoid build-time failures
   #define NOINLINE __attribute__((noinline))
 #else
   #define NOINLINE
@@ -128,7 +128,7 @@
 // extremely sensitive to race condition. Be careful.
 
 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
- if (UseBiasedLocking) {
+  if (UseBiasedLocking) {
     if (!SafepointSynchronize::is_at_safepoint()) {
       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
@@ -139,9 +139,9 @@
       BiasedLocking::revoke_at_safepoint(obj);
     }
     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
- }
+  }
 
- slow_enter(obj, lock, THREAD);
+  slow_enter(obj, lock, THREAD);
 }
 
 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
@@ -150,19 +150,19 @@
   markOop dhw = lock->displaced_header();
   markOop mark;
   if (dhw == NULL) {
-     // Recursive stack-lock.
-     // Diagnostics -- Could be: stack-locked, inflating, inflated.
-     mark = object->mark();
-     assert(!mark->is_neutral(), "invariant");
-     if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
-        assert(THREAD->is_lock_owned((address)mark->locker()), "invariant");
-     }
-     if (mark->has_monitor()) {
-        ObjectMonitor * m = mark->monitor();
-        assert(((oop)(m->object()))->mark() == mark, "invariant");
-        assert(m->is_entered(THREAD), "invariant");
-     }
-     return;
+    // Recursive stack-lock.
+    // Diagnostics -- Could be: stack-locked, inflating, inflated.
+    mark = object->mark();
+    assert(!mark->is_neutral(), "invariant");
+    if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
+      assert(THREAD->is_lock_owned((address)mark->locker()), "invariant");
+    }
+    if (mark->has_monitor()) {
+      ObjectMonitor * m = mark->monitor();
+      assert(((oop)(m->object()))->mark() == mark, "invariant");
+      assert(m->is_entered(THREAD), "invariant");
+    }
+    return;
   }
 
   mark = object->mark();
@@ -170,11 +170,11 @@
   // If the object is stack-locked by the current thread, try to
   // swing the displaced header from the box back to the mark.
   if (mark == (markOop) lock) {
-     assert(dhw->is_neutral(), "invariant");
-     if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
-        TEVENT(fast_exit: release stacklock);
-        return;
-     }
+    assert(dhw->is_neutral(), "invariant");
+    if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
+      TEVENT(fast_exit: release stacklock);
+      return;
+    }
   }
 
   ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD);
@@ -299,7 +299,7 @@
   // If this thread has locked the object, exit the monitor.  Note:  can't use
   // monitor->check(CHECK); must exit even if an exception is pending.
   if (monitor->check(THREAD)) {
-     monitor->exit(true, THREAD);
+    monitor->exit(true, THREAD);
   }
 }
 
@@ -362,7 +362,7 @@
 }
 
 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
- if (UseBiasedLocking) {
+  if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   }
@@ -410,16 +410,16 @@
 // performed by the CPU(s) or platform.
 
 struct SharedGlobals {
-    // These are highly shared mostly-read variables.
-    // To avoid false-sharing they need to be the sole occupants of a $ line.
-    double padPrefix[8];
-    volatile int stwRandom;
-    volatile int stwCycle;
+  // These are highly shared mostly-read variables.
+  // To avoid false-sharing they need to be the sole occupants of a $ line.
+  double padPrefix[8];
+  volatile int stwRandom;
+  volatile int stwCycle;
 
-    // Hot RW variables -- Sequester to avoid false-sharing
-    double padSuffix[16];
-    volatile int hcSequence;
-    double padFinal[8];
+  // Hot RW variables -- Sequester to avoid false-sharing
+  double padSuffix[16];
+  volatile int hcSequence;
+  double padFinal[8];
 };
 
 static SharedGlobals GVars;
@@ -451,45 +451,45 @@
 
     ++its;
     if (its > 10000 || !os::is_MP()) {
-       if (its & 1) {
-         os::naked_yield();
-         TEVENT(Inflate: INFLATING - yield);
-       } else {
-         // Note that the following code attenuates the livelock problem but is not
-         // a complete remedy.  A more complete solution would require that the inflating
-         // thread hold the associated inflation lock.  The following code simply restricts
-         // the number of spinners to at most one.  We'll have N-2 threads blocked
-         // on the inflationlock, 1 thread holding the inflation lock and using
-         // a yield/park strategy, and 1 thread in the midst of inflation.
-         // A more refined approach would be to change the encoding of INFLATING
-         // to allow encapsulation of a native thread pointer.  Threads waiting for
-         // inflation to complete would use CAS to push themselves onto a singly linked
-         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
-         // and calling park().  When inflation was complete the thread that accomplished inflation
-         // would detach the list and set the markword to inflated with a single CAS and
-         // then for each thread on the list, set the flag and unpark() the thread.
-         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
-         // wakes at most one thread whereas we need to wake the entire list.
-         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
-         int YieldThenBlock = 0;
-         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
-         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
-         Thread::muxAcquire(InflationLocks + ix, "InflationLock");
-         while (obj->mark() == markOopDesc::INFLATING()) {
-           // Beware: NakedYield() is advisory and has almost no effect on some platforms
-           // so we periodically call Self->_ParkEvent->park(1).
-           // We use a mixed spin/yield/block mechanism.
-           if ((YieldThenBlock++) >= 16) {
-              Thread::current()->_ParkEvent->park(1);
-           } else {
-              os::naked_yield();
-           }
-         }
-         Thread::muxRelease(InflationLocks + ix);
-         TEVENT(Inflate: INFLATING - yield/park);
-       }
+      if (its & 1) {
+        os::naked_yield();
+        TEVENT(Inflate: INFLATING - yield);
+      } else {
+        // Note that the following code attenuates the livelock problem but is not
+        // a complete remedy.  A more complete solution would require that the inflating
+        // thread hold the associated inflation lock.  The following code simply restricts
+        // the number of spinners to at most one.  We'll have N-2 threads blocked
+        // on the inflationlock, 1 thread holding the inflation lock and using
+        // a yield/park strategy, and 1 thread in the midst of inflation.
+        // A more refined approach would be to change the encoding of INFLATING
+        // to allow encapsulation of a native thread pointer.  Threads waiting for
+        // inflation to complete would use CAS to push themselves onto a singly linked
+        // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
+        // and calling park().  When inflation was complete the thread that accomplished inflation
+        // would detach the list and set the markword to inflated with a single CAS and
+        // then for each thread on the list, set the flag and unpark() the thread.
+        // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
+        // wakes at most one thread whereas we need to wake the entire list.
+        int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
+        int YieldThenBlock = 0;
+        assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
+        assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
+        Thread::muxAcquire(InflationLocks + ix, "InflationLock");
+        while (obj->mark() == markOopDesc::INFLATING()) {
+          // Beware: NakedYield() is advisory and has almost no effect on some platforms
+          // so we periodically call Self->_ParkEvent->park(1).
+          // We use a mixed spin/yield/block mechanism.
+          if ((YieldThenBlock++) >= 16) {
+            Thread::current()->_ParkEvent->park(1);
+          } else {
+            os::naked_yield();
+          }
+        }
+        Thread::muxRelease(InflationLocks + ix);
+        TEVENT(Inflate: INFLATING - yield/park);
+      }
     } else {
-       SpinPause();       // SMP-polite spinning
+      SpinPause();       // SMP-polite spinning
     }
   }
 }
@@ -515,40 +515,40 @@
 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
   intptr_t value = 0;
   if (hashCode == 0) {
-     // This form uses an unguarded global Park-Miller RNG,
-     // so it's possible for two threads to race and generate the same RNG.
-     // On MP system we'll have lots of RW access to a global, so the
-     // mechanism induces lots of coherency traffic.
-     value = os::random();
+    // This form uses an unguarded global Park-Miller RNG,
+    // so it's possible for two threads to race and generate the same RNG.
+    // On MP system we'll have lots of RW access to a global, so the
+    // mechanism induces lots of coherency traffic.
+    value = os::random();
   } else
   if (hashCode == 1) {
-     // This variation has the property of being stable (idempotent)
-     // between STW operations.  This can be useful in some of the 1-0
-     // synchronization schemes.
-     intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
-     value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
+    // This variation has the property of being stable (idempotent)
+    // between STW operations.  This can be useful in some of the 1-0
+    // synchronization schemes.
+    intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
+    value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
   } else
   if (hashCode == 2) {
-     value = 1;            // for sensitivity testing
+    value = 1;            // for sensitivity testing
   } else
   if (hashCode == 3) {
-     value = ++GVars.hcSequence;
+    value = ++GVars.hcSequence;
   } else
   if (hashCode == 4) {
-     value = cast_from_oop<intptr_t>(obj);
+    value = cast_from_oop<intptr_t>(obj);
   } else {
-     // Marsaglia's xor-shift scheme with thread-specific state
-     // This is probably the best overall implementation -- we'll
-     // likely make this the default in future releases.
-     unsigned t = Self->_hashStateX;
-     t ^= (t << 11);
-     Self->_hashStateX = Self->_hashStateY;
-     Self->_hashStateY = Self->_hashStateZ;
-     Self->_hashStateZ = Self->_hashStateW;
-     unsigned v = Self->_hashStateW;
-     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
-     Self->_hashStateW = v;
-     value = v;
+    // Marsaglia's xor-shift scheme with thread-specific state
+    // This is probably the best overall implementation -- we'll
+    // likely make this the default in future releases.
+    unsigned t = Self->_hashStateX;
+    t ^= (t << 11);
+    Self->_hashStateX = Self->_hashStateY;
+    Self->_hashStateY = Self->_hashStateZ;
+    Self->_hashStateZ = Self->_hashStateW;
+    unsigned v = Self->_hashStateW;
+    v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
+    Self->_hashStateW = v;
+    value = v;
   }
 
   value &= markOopDesc::hash_mask;
@@ -572,7 +572,7 @@
       Handle hobj(Self, obj);
       // Relaxing assertion for bug 6320749.
       assert(Universe::verify_in_progress() ||
-              !SafepointSynchronize::is_at_safepoint(),
+             !SafepointSynchronize::is_at_safepoint(),
              "biases should not be seen by VM thread here");
       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
       obj = hobj();
@@ -583,9 +583,9 @@
   // hashCode() is a heap mutator ...
   // Relaxing assertion for bug 6320749.
   assert(Universe::verify_in_progress() ||
-          !SafepointSynchronize::is_at_safepoint(), "invariant");
+         !SafepointSynchronize::is_at_safepoint(), "invariant");
   assert(Universe::verify_in_progress() ||
-          Self->is_Java_thread() , "invariant");
+         Self->is_Java_thread() , "invariant");
   assert(Universe::verify_in_progress() ||
          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 
@@ -887,143 +887,143 @@
 }
 
 void ObjectSynchronizer::verifyInUse (Thread *Self) {
-   ObjectMonitor* mid;
-   int inusetally = 0;
-   for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
-     inusetally++;
-   }
-   assert(inusetally == Self->omInUseCount, "inuse count off");
+  ObjectMonitor* mid;
+  int inusetally = 0;
+  for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
+    inusetally++;
+  }
+  assert(inusetally == Self->omInUseCount, "inuse count off");
 
-   int freetally = 0;
-   for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
-     freetally++;
-   }
-   assert(freetally == Self->omFreeCount, "free count off");
+  int freetally = 0;
+  for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
+    freetally++;
+  }
+  assert(freetally == Self->omFreeCount, "free count off");
 }
 
 ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc (Thread * Self) {
-    // A large MAXPRIVATE value reduces both list lock contention
-    // and list coherency traffic, but also tends to increase the
-    // number of objectMonitors in circulation as well as the STW
-    // scavenge costs.  As usual, we lean toward time in space-time
-    // tradeoffs.
-    const int MAXPRIVATE = 1024;
-    for (;;) {
-        ObjectMonitor * m;
+  // A large MAXPRIVATE value reduces both list lock contention
+  // and list coherency traffic, but also tends to increase the
+  // number of objectMonitors in circulation as well as the STW
+  // scavenge costs.  As usual, we lean toward time in space-time
+  // tradeoffs.
+  const int MAXPRIVATE = 1024;
+  for (;;) {
+    ObjectMonitor * m;
 
-        // 1: try to allocate from the thread's local omFreeList.
-        // Threads will attempt to allocate first from their local list, then
-        // from the global list, and only after those attempts fail will the thread
-        // attempt to instantiate new monitors.   Thread-local free lists take
-        // heat off the ListLock and improve allocation latency, as well as reducing
-        // coherency traffic on the shared global list.
-        m = Self->omFreeList;
-        if (m != NULL) {
-           Self->omFreeList = m->FreeNext;
-           Self->omFreeCount--;
-           // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
-           guarantee(m->object() == NULL, "invariant");
-           if (MonitorInUseLists) {
-             m->FreeNext = Self->omInUseList;
-             Self->omInUseList = m;
-             Self->omInUseCount++;
-             if (ObjectMonitor::Knob_VerifyInUse) {
-               verifyInUse(Self);
-             }
-           } else {
-             m->FreeNext = NULL;
-           }
-           return m;
+    // 1: try to allocate from the thread's local omFreeList.
+    // Threads will attempt to allocate first from their local list, then
+    // from the global list, and only after those attempts fail will the thread
+    // attempt to instantiate new monitors.   Thread-local free lists take
+    // heat off the ListLock and improve allocation latency, as well as reducing
+    // coherency traffic on the shared global list.
+    m = Self->omFreeList;
+    if (m != NULL) {
+      Self->omFreeList = m->FreeNext;
+      Self->omFreeCount--;
+      // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
+      guarantee(m->object() == NULL, "invariant");
+      if (MonitorInUseLists) {
+        m->FreeNext = Self->omInUseList;
+        Self->omInUseList = m;
+        Self->omInUseCount++;
+        if (ObjectMonitor::Knob_VerifyInUse) {
+          verifyInUse(Self);
         }
+      } else {
+        m->FreeNext = NULL;
+      }
+      return m;
+    }
 
-        // 2: try to allocate from the global gFreeList
-        // CONSIDER: use muxTry() instead of muxAcquire().
-        // If the muxTry() fails then drop immediately into case 3.
-        // If we're using thread-local free lists then try
-        // to reprovision the caller's free list.
-        if (gFreeList != NULL) {
-            // Reprovision the thread's omFreeList.
-            // Use bulk transfers to reduce the allocation rate and heat
-            // on various locks.
-            Thread::muxAcquire(&ListLock, "omAlloc");
-            for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
-                MonitorFreeCount--;
-                ObjectMonitor * take = gFreeList;
-                gFreeList = take->FreeNext;
-                guarantee(take->object() == NULL, "invariant");
-                guarantee(!take->is_busy(), "invariant");
-                take->Recycle();
-                omRelease(Self, take, false);
-            }
-            Thread::muxRelease(&ListLock);
-            Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
-            if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
-            TEVENT(omFirst - reprovision);
+    // 2: try to allocate from the global gFreeList
+    // CONSIDER: use muxTry() instead of muxAcquire().
+    // If the muxTry() fails then drop immediately into case 3.
+    // If we're using thread-local free lists then try
+    // to reprovision the caller's free list.
+    if (gFreeList != NULL) {
+      // Reprovision the thread's omFreeList.
+      // Use bulk transfers to reduce the allocation rate and heat
+      // on various locks.
+      Thread::muxAcquire(&ListLock, "omAlloc");
+      for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
+        MonitorFreeCount--;
+        ObjectMonitor * take = gFreeList;
+        gFreeList = take->FreeNext;
+        guarantee(take->object() == NULL, "invariant");
+        guarantee(!take->is_busy(), "invariant");
+        take->Recycle();
+        omRelease(Self, take, false);
+      }
+      Thread::muxRelease(&ListLock);
+      Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
+      if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
+      TEVENT(omFirst - reprovision);
 
-            const int mx = MonitorBound;
-            if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
-              // We can't safely induce a STW safepoint from omAlloc() as our thread
-              // state may not be appropriate for such activities and callers may hold
-              // naked oops, so instead we defer the action.
-              InduceScavenge(Self, "omAlloc");
-            }
-            continue;
-        }
+      const int mx = MonitorBound;
+      if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
+        // We can't safely induce a STW safepoint from omAlloc() as our thread
+        // state may not be appropriate for such activities and callers may hold
+        // naked oops, so instead we defer the action.
+        InduceScavenge(Self, "omAlloc");
+      }
+      continue;
+    }
 
-        // 3: allocate a block of new ObjectMonitors
-        // Both the local and global free lists are empty -- resort to malloc().
-        // In the current implementation objectMonitors are TSM - immortal.
-        assert(_BLOCKSIZE > 1, "invariant");
-        ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
+    // 3: allocate a block of new ObjectMonitors
+    // Both the local and global free lists are empty -- resort to malloc().
+    // In the current implementation objectMonitors are TSM - immortal.
+    assert(_BLOCKSIZE > 1, "invariant");
+    ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
 
-        // NOTE: (almost) no way to recover if allocation failed.
-        // We might be able to induce a STW safepoint and scavenge enough
-        // objectMonitors to permit progress.
-        if (temp == NULL) {
-            vm_exit_out_of_memory(sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR,
-                                   "Allocate ObjectMonitors");
-        }
+    // NOTE: (almost) no way to recover if allocation failed.
+    // We might be able to induce a STW safepoint and scavenge enough
+    // objectMonitors to permit progress.
+    if (temp == NULL) {
+      vm_exit_out_of_memory(sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR,
+                            "Allocate ObjectMonitors");
+    }
 
-        // Format the block.
-        // initialize the linked list, each monitor points to its next
-        // forming the single linked free list, the very first monitor
-        // will points to next block, which forms the block list.
-        // The trick of using the 1st element in the block as gBlockList
-        // linkage should be reconsidered.  A better implementation would
-        // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
+    // Format the block.
+    // initialize the linked list, each monitor points to its next
+    // forming the single linked free list, the very first monitor
+    // will points to next block, which forms the block list.
+    // The trick of using the 1st element in the block as gBlockList
+    // linkage should be reconsidered.  A better implementation would
+    // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
 
-        for (int i = 1; i < _BLOCKSIZE; i++) {
-           temp[i].FreeNext = &temp[i+1];
-        }
+    for (int i = 1; i < _BLOCKSIZE; i++) {
+      temp[i].FreeNext = &temp[i+1];
+    }
 
-        // terminate the last monitor as the end of list
-        temp[_BLOCKSIZE - 1].FreeNext = NULL;
+    // terminate the last monitor as the end of list
+    temp[_BLOCKSIZE - 1].FreeNext = NULL;
 
-        // Element [0] is reserved for global list linkage
-        temp[0].set_object(CHAINMARKER);
+    // Element [0] is reserved for global list linkage
+    temp[0].set_object(CHAINMARKER);
 
-        // Consider carving out this thread's current request from the
-        // block in hand.  This avoids some lock traffic and redundant
-        // list activity.
+    // Consider carving out this thread's current request from the
+    // block in hand.  This avoids some lock traffic and redundant
+    // list activity.
 
-        // Acquire the ListLock to manipulate BlockList and FreeList.
-        // An Oyama-Taura-Yonezawa scheme might be more efficient.
-        Thread::muxAcquire(&ListLock, "omAlloc [2]");
-        MonitorPopulation += _BLOCKSIZE-1;
-        MonitorFreeCount += _BLOCKSIZE-1;
+    // Acquire the ListLock to manipulate BlockList and FreeList.
+    // An Oyama-Taura-Yonezawa scheme might be more efficient.
+    Thread::muxAcquire(&ListLock, "omAlloc [2]");
+    MonitorPopulation += _BLOCKSIZE-1;
+    MonitorFreeCount += _BLOCKSIZE-1;
 
-        // Add the new block to the list of extant blocks (gBlockList).
-        // The very first objectMonitor in a block is reserved and dedicated.
-        // It serves as blocklist "next" linkage.
-        temp[0].FreeNext = gBlockList;
-        gBlockList = temp;
+    // Add the new block to the list of extant blocks (gBlockList).
+    // The very first objectMonitor in a block is reserved and dedicated.
+    // It serves as blocklist "next" linkage.
+    temp[0].FreeNext = gBlockList;
+    gBlockList = temp;
 
-        // Add the new string of objectMonitors to the global free list
-        temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
-        gFreeList = temp + 1;
-        Thread::muxRelease(&ListLock);
-        TEVENT(Allocate block of monitors);
-    }
+    // Add the new string of objectMonitors to the global free list
+    temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
+    gFreeList = temp + 1;
+    Thread::muxRelease(&ListLock);
+    TEVENT(Allocate block of monitors);
+  }
 }
 
 // Place "m" on the caller's private per-thread omFreeList.
@@ -1035,27 +1035,27 @@
 //
 
 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
-    guarantee(m->object() == NULL, "invariant");
+  guarantee(m->object() == NULL, "invariant");
 
-    // Remove from omInUseList
-    if (MonitorInUseLists && fromPerThreadAlloc) {
-      ObjectMonitor* curmidinuse = NULL;
-      for (ObjectMonitor* mid = Self->omInUseList; mid != NULL;) {
-       if (m == mid) {
-         // extract from per-thread in-use-list
-         if (mid == Self->omInUseList) {
-           Self->omInUseList = mid->FreeNext;
-         } else if (curmidinuse != NULL) {
-           curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
-         }
-         Self->omInUseCount--;
-         if (ObjectMonitor::Knob_VerifyInUse) {
-           verifyInUse(Self);
-         }
-         break;
-       } else {
-         curmidinuse = mid;
-         mid = mid->FreeNext;
+  // Remove from omInUseList
+  if (MonitorInUseLists && fromPerThreadAlloc) {
+    ObjectMonitor* curmidinuse = NULL;
+    for (ObjectMonitor* mid = Self->omInUseList; mid != NULL;) {
+      if (m == mid) {
+        // extract from per-thread in-use-list
+        if (mid == Self->omInUseList) {
+          Self->omInUseList = mid->FreeNext;
+        } else if (curmidinuse != NULL) {
+          curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
+        }
+        Self->omInUseCount--;
+        if (ObjectMonitor::Knob_VerifyInUse) {
+          verifyInUse(Self);
+        }
+        break;
+      } else {
+        curmidinuse = mid;
+        mid = mid->FreeNext;
       }
     }
   }
@@ -1087,53 +1087,53 @@
 // operator.
 
 void ObjectSynchronizer::omFlush (Thread * Self) {
-    ObjectMonitor * List = Self->omFreeList;  // Null-terminated SLL
-    Self->omFreeList = NULL;
-    ObjectMonitor * Tail = NULL;
-    int Tally = 0;
-    if (List != NULL) {
-      ObjectMonitor * s;
-      for (s = List; s != NULL; s = s->FreeNext) {
-          Tally++;
-          Tail = s;
-          guarantee(s->object() == NULL, "invariant");
-          guarantee(!s->is_busy(), "invariant");
-          s->set_owner(NULL);   // redundant but good hygiene
-          TEVENT(omFlush - Move one);
-      }
-      guarantee(Tail != NULL && List != NULL, "invariant");
+  ObjectMonitor * List = Self->omFreeList;  // Null-terminated SLL
+  Self->omFreeList = NULL;
+  ObjectMonitor * Tail = NULL;
+  int Tally = 0;
+  if (List != NULL) {
+    ObjectMonitor * s;
+    for (s = List; s != NULL; s = s->FreeNext) {
+      Tally++;
+      Tail = s;
+      guarantee(s->object() == NULL, "invariant");
+      guarantee(!s->is_busy(), "invariant");
+      s->set_owner(NULL);   // redundant but good hygiene
+      TEVENT(omFlush - Move one);
     }
+    guarantee(Tail != NULL && List != NULL, "invariant");
+  }
 
-    ObjectMonitor * InUseList = Self->omInUseList;
-    ObjectMonitor * InUseTail = NULL;
-    int InUseTally = 0;
-    if (InUseList != NULL) {
-      Self->omInUseList = NULL;
-      ObjectMonitor *curom;
-      for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
-        InUseTail = curom;
-        InUseTally++;
-      }
-      assert(Self->omInUseCount == InUseTally, "inuse count off");
-      Self->omInUseCount = 0;
-      guarantee(InUseTail != NULL && InUseList != NULL, "invariant");
+  ObjectMonitor * InUseList = Self->omInUseList;
+  ObjectMonitor * InUseTail = NULL;
+  int InUseTally = 0;
+  if (InUseList != NULL) {
+    Self->omInUseList = NULL;
+    ObjectMonitor *curom;
+    for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
+      InUseTail = curom;
+      InUseTally++;
     }
+    assert(Self->omInUseCount == InUseTally, "inuse count off");
+    Self->omInUseCount = 0;
+    guarantee(InUseTail != NULL && InUseList != NULL, "invariant");
+  }
 
-    Thread::muxAcquire(&ListLock, "omFlush");
-    if (Tail != NULL) {
-      Tail->FreeNext = gFreeList;
-      gFreeList = List;
-      MonitorFreeCount += Tally;
-    }
+  Thread::muxAcquire(&ListLock, "omFlush");
+  if (Tail != NULL) {
+    Tail->FreeNext = gFreeList;
+    gFreeList = List;
+    MonitorFreeCount += Tally;
+  }
 
-    if (InUseTail != NULL) {
-      InUseTail->FreeNext = gOmInUseList;
-      gOmInUseList = InUseList;
-      gOmInUseCount += InUseTally;
-    }
+  if (InUseTail != NULL) {
+    InUseTail->FreeNext = gOmInUseList;
+    gOmInUseList = InUseList;
+    gOmInUseCount += InUseTally;
+  }
 
-    Thread::muxRelease(&ListLock);
-    TEVENT(omFlush);
+  Thread::muxRelease(&ListLock);
+  TEVENT(omFlush);
 }
 
 // Fast path code shared by multiple functions
@@ -1156,189 +1156,189 @@
   // Inflate mutates the heap ...
   // Relaxing assertion for bug 6320749.
   assert(Universe::verify_in_progress() ||
-          !SafepointSynchronize::is_at_safepoint(), "invariant");
+         !SafepointSynchronize::is_at_safepoint(), "invariant");
 
   for (;;) {
-      const markOop mark = object->mark();
-      assert(!mark->has_bias_pattern(), "invariant");
+    const markOop mark = object->mark();
+    assert(!mark->has_bias_pattern(), "invariant");
+
+    // The mark can be in one of the following states:
+    // *  Inflated     - just return
+    // *  Stack-locked - coerce it to inflated
+    // *  INFLATING    - busy wait for conversion to complete
+    // *  Neutral      - aggressively inflate the object.
+    // *  BIASED       - Illegal.  We should never see this
 
-      // The mark can be in one of the following states:
-      // *  Inflated     - just return
-      // *  Stack-locked - coerce it to inflated
-      // *  INFLATING    - busy wait for conversion to complete
-      // *  Neutral      - aggressively inflate the object.
-      // *  BIASED       - Illegal.  We should never see this
+    // CASE: inflated
+    if (mark->has_monitor()) {
+      ObjectMonitor * inf = mark->monitor();
+      assert(inf->header()->is_neutral(), "invariant");
+      assert(inf->object() == object, "invariant");
+      assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
+      return inf;
+    }
+
+    // CASE: inflation in progress - inflating over a stack-lock.
+    // Some other thread is converting from stack-locked to inflated.
+    // Only that thread can complete inflation -- other threads must wait.
+    // The INFLATING value is transient.
+    // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
+    // We could always eliminate polling by parking the thread on some auxiliary list.
+    if (mark == markOopDesc::INFLATING()) {
+      TEVENT(Inflate: spin while INFLATING);
+      ReadStableMark(object);
+      continue;
+    }
 
-      // CASE: inflated
-      if (mark->has_monitor()) {
-          ObjectMonitor * inf = mark->monitor();
-          assert(inf->header()->is_neutral(), "invariant");
-          assert(inf->object() == object, "invariant");
-          assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
-          return inf;
-      }
+    // CASE: stack-locked
+    // Could be stack-locked either by this thread or by some other thread.
+    //
+    // Note that we allocate the objectmonitor speculatively, _before_ attempting
+    // to install INFLATING into the mark word.  We originally installed INFLATING,
+    // allocated the objectmonitor, and then finally STed the address of the
+    // objectmonitor into the mark.  This was correct, but artificially lengthened
+    // the interval in which INFLATED appeared in the mark, thus increasing
+    // the odds of inflation contention.
+    //
+    // We now use per-thread private objectmonitor free lists.
+    // These list are reprovisioned from the global free list outside the
+    // critical INFLATING...ST interval.  A thread can transfer
+    // multiple objectmonitors en-mass from the global free list to its local free list.
+    // This reduces coherency traffic and lock contention on the global free list.
+    // Using such local free lists, it doesn't matter if the omAlloc() call appears
+    // before or after the CAS(INFLATING) operation.
+    // See the comments in omAlloc().
 
-      // CASE: inflation in progress - inflating over a stack-lock.
-      // Some other thread is converting from stack-locked to inflated.
-      // Only that thread can complete inflation -- other threads must wait.
-      // The INFLATING value is transient.
-      // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
-      // We could always eliminate polling by parking the thread on some auxiliary list.
-      if (mark == markOopDesc::INFLATING()) {
-         TEVENT(Inflate: spin while INFLATING);
-         ReadStableMark(object);
-         continue;
+    if (mark->has_locker()) {
+      ObjectMonitor * m = omAlloc(Self);
+      // Optimistically prepare the objectmonitor - anticipate successful CAS
+      // We do this before the CAS in order to minimize the length of time
+      // in which INFLATING appears in the mark.
+      m->Recycle();
+      m->_Responsible  = NULL;
+      m->OwnerIsThread = 0;
+      m->_recursions   = 0;
+      m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
+
+      markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark);
+      if (cmp != mark) {
+        omRelease(Self, m, true);
+        continue;       // Interference -- just retry
       }
 
-      // CASE: stack-locked
-      // Could be stack-locked either by this thread or by some other thread.
-      //
-      // Note that we allocate the objectmonitor speculatively, _before_ attempting
-      // to install INFLATING into the mark word.  We originally installed INFLATING,
-      // allocated the objectmonitor, and then finally STed the address of the
-      // objectmonitor into the mark.  This was correct, but artificially lengthened
-      // the interval in which INFLATED appeared in the mark, thus increasing
-      // the odds of inflation contention.
+      // We've successfully installed INFLATING (0) into the mark-word.
+      // This is the only case where 0 will appear in a mark-work.
+      // Only the singular thread that successfully swings the mark-word
+      // to 0 can perform (or more precisely, complete) inflation.
       //
-      // We now use per-thread private objectmonitor free lists.
-      // These list are reprovisioned from the global free list outside the
-      // critical INFLATING...ST interval.  A thread can transfer
-      // multiple objectmonitors en-mass from the global free list to its local free list.
-      // This reduces coherency traffic and lock contention on the global free list.
-      // Using such local free lists, it doesn't matter if the omAlloc() call appears
-      // before or after the CAS(INFLATING) operation.
-      // See the comments in omAlloc().
-
-      if (mark->has_locker()) {
-          ObjectMonitor * m = omAlloc(Self);
-          // Optimistically prepare the objectmonitor - anticipate successful CAS
-          // We do this before the CAS in order to minimize the length of time
-          // in which INFLATING appears in the mark.
-          m->Recycle();
-          m->_Responsible  = NULL;
-          m->OwnerIsThread = 0;
-          m->_recursions   = 0;
-          m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
-
-          markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark);
-          if (cmp != mark) {
-             omRelease(Self, m, true);
-             continue;       // Interference -- just retry
-          }
-
-          // We've successfully installed INFLATING (0) into the mark-word.
-          // This is the only case where 0 will appear in a mark-work.
-          // Only the singular thread that successfully swings the mark-word
-          // to 0 can perform (or more precisely, complete) inflation.
-          //
-          // Why do we CAS a 0 into the mark-word instead of just CASing the
-          // mark-word from the stack-locked value directly to the new inflated state?
-          // Consider what happens when a thread unlocks a stack-locked object.
-          // It attempts to use CAS to swing the displaced header value from the
-          // on-stack basiclock back into the object header.  Recall also that the
-          // header value (hashcode, etc) can reside in (a) the object header, or
-          // (b) a displaced header associated with the stack-lock, or (c) a displaced
-          // header in an objectMonitor.  The inflate() routine must copy the header
-          // value from the basiclock on the owner's stack to the objectMonitor, all
-          // the while preserving the hashCode stability invariants.  If the owner
-          // decides to release the lock while the value is 0, the unlock will fail
-          // and control will eventually pass from slow_exit() to inflate.  The owner
-          // will then spin, waiting for the 0 value to disappear.   Put another way,
-          // the 0 causes the owner to stall if the owner happens to try to
-          // drop the lock (restoring the header from the basiclock to the object)
-          // while inflation is in-progress.  This protocol avoids races that might
-          // would otherwise permit hashCode values to change or "flicker" for an object.
-          // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
-          // 0 serves as a "BUSY" inflate-in-progress indicator.
+      // Why do we CAS a 0 into the mark-word instead of just CASing the
+      // mark-word from the stack-locked value directly to the new inflated state?
+      // Consider what happens when a thread unlocks a stack-locked object.
+      // It attempts to use CAS to swing the displaced header value from the
+      // on-stack basiclock back into the object header.  Recall also that the
+      // header value (hashcode, etc) can reside in (a) the object header, or
+      // (b) a displaced header associated with the stack-lock, or (c) a displaced
+      // header in an objectMonitor.  The inflate() routine must copy the header
+      // value from the basiclock on the owner's stack to the objectMonitor, all
+      // the while preserving the hashCode stability invariants.  If the owner
+      // decides to release the lock while the value is 0, the unlock will fail
+      // and control will eventually pass from slow_exit() to inflate.  The owner
+      // will then spin, waiting for the 0 value to disappear.   Put another way,
+      // the 0 causes the owner to stall if the owner happens to try to
+      // drop the lock (restoring the header from the basiclock to the object)
+      // while inflation is in-progress.  This protocol avoids races that might
+      // would otherwise permit hashCode values to change or "flicker" for an object.
+      // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
+      // 0 serves as a "BUSY" inflate-in-progress indicator.
 
 
-          // fetch the displaced mark from the owner's stack.
-          // The owner can't die or unwind past the lock while our INFLATING
-          // object is in the mark.  Furthermore the owner can't complete
-          // an unlock on the object, either.
-          markOop dmw = mark->displaced_mark_helper();
-          assert(dmw->is_neutral(), "invariant");
-
-          // Setup monitor fields to proper values -- prepare the monitor
-          m->set_header(dmw);
-
-          // Optimization: if the mark->locker stack address is associated
-          // with this thread we could simply set m->_owner = Self and
-          // m->OwnerIsThread = 1. Note that a thread can inflate an object
-          // that it has stack-locked -- as might happen in wait() -- directly
-          // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
-          m->set_owner(mark->locker());
-          m->set_object(object);
-          // TODO-FIXME: assert BasicLock->dhw != 0.
+      // fetch the displaced mark from the owner's stack.
+      // The owner can't die or unwind past the lock while our INFLATING
+      // object is in the mark.  Furthermore the owner can't complete
+      // an unlock on the object, either.
+      markOop dmw = mark->displaced_mark_helper();
+      assert(dmw->is_neutral(), "invariant");
 
-          // Must preserve store ordering. The monitor state must
-          // be stable at the time of publishing the monitor address.
-          guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
-          object->release_set_mark(markOopDesc::encode(m));
-
-          // Hopefully the performance counters are allocated on distinct cache lines
-          // to avoid false sharing on MP systems ...
-          if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
-          TEVENT(Inflate: overwrite stacklock);
-          if (TraceMonitorInflation) {
-            if (object->is_instance()) {
-              ResourceMark rm;
-              tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
-                (void *) object, (intptr_t) object->mark(),
-                object->klass()->external_name());
-            }
-          }
-          return m;
-      }
+      // Setup monitor fields to proper values -- prepare the monitor
+      m->set_header(dmw);
 
-      // CASE: neutral
-      // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
-      // If we know we're inflating for entry it's better to inflate by swinging a
-      // pre-locked objectMonitor pointer into the object header.   A successful
-      // CAS inflates the object *and* confers ownership to the inflating thread.
-      // In the current implementation we use a 2-step mechanism where we CAS()
-      // to inflate and then CAS() again to try to swing _owner from NULL to Self.
-      // An inflateTry() method that we could call from fast_enter() and slow_enter()
-      // would be useful.
-
-      assert(mark->is_neutral(), "invariant");
-      ObjectMonitor * m = omAlloc(Self);
-      // prepare m for installation - set monitor to initial state
-      m->Recycle();
-      m->set_header(mark);
-      m->set_owner(NULL);
+      // Optimization: if the mark->locker stack address is associated
+      // with this thread we could simply set m->_owner = Self and
+      // m->OwnerIsThread = 1. Note that a thread can inflate an object
+      // that it has stack-locked -- as might happen in wait() -- directly
+      // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
+      m->set_owner(mark->locker());
       m->set_object(object);
-      m->OwnerIsThread = 1;
-      m->_recursions   = 0;
-      m->_Responsible  = NULL;
-      m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
+      // TODO-FIXME: assert BasicLock->dhw != 0.
 
-      if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
-          m->set_object(NULL);
-          m->set_owner(NULL);
-          m->OwnerIsThread = 0;
-          m->Recycle();
-          omRelease(Self, m, true);
-          m = NULL;
-          continue;
-          // interference - the markword changed - just retry.
-          // The state-transitions are one-way, so there's no chance of
-          // live-lock -- "Inflated" is an absorbing state.
-      }
+      // Must preserve store ordering. The monitor state must
+      // be stable at the time of publishing the monitor address.
+      guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
+      object->release_set_mark(markOopDesc::encode(m));
 
-      // Hopefully the performance counters are allocated on distinct
-      // cache lines to avoid false sharing on MP systems ...
+      // Hopefully the performance counters are allocated on distinct cache lines
+      // to avoid false sharing on MP systems ...
       if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
-      TEVENT(Inflate: overwrite neutral);
+      TEVENT(Inflate: overwrite stacklock);
       if (TraceMonitorInflation) {
         if (object->is_instance()) {
           ResourceMark rm;
           tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
-            (void *) object, (intptr_t) object->mark(),
-            object->klass()->external_name());
+                        (void *) object, (intptr_t) object->mark(),
+                        object->klass()->external_name());
         }
       }
       return m;
+    }
+
+    // CASE: neutral
+    // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
+    // If we know we're inflating for entry it's better to inflate by swinging a
+    // pre-locked objectMonitor pointer into the object header.   A successful
+    // CAS inflates the object *and* confers ownership to the inflating thread.
+    // In the current implementation we use a 2-step mechanism where we CAS()
+    // to inflate and then CAS() again to try to swing _owner from NULL to Self.
+    // An inflateTry() method that we could call from fast_enter() and slow_enter()
+    // would be useful.
+
+    assert(mark->is_neutral(), "invariant");
+    ObjectMonitor * m = omAlloc(Self);
+    // prepare m for installation - set monitor to initial state
+    m->Recycle();
+    m->set_header(mark);
+    m->set_owner(NULL);
+    m->set_object(object);
+    m->OwnerIsThread = 1;
+    m->_recursions   = 0;
+    m->_Responsible  = NULL;
+    m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
+
+    if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
+      m->set_object(NULL);
+      m->set_owner(NULL);
+      m->OwnerIsThread = 0;
+      m->Recycle();
+      omRelease(Self, m, true);
+      m = NULL;
+      continue;
+      // interference - the markword changed - just retry.
+      // The state-transitions are one-way, so there's no chance of
+      // live-lock -- "Inflated" is an absorbing state.
+    }
+
+    // Hopefully the performance counters are allocated on distinct
+    // cache lines to avoid false sharing on MP systems ...
+    if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
+    TEVENT(Inflate: overwrite neutral);
+    if (TraceMonitorInflation) {
+      if (object->is_instance()) {
+        ResourceMark rm;
+        tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
+                      (void *) object, (intptr_t) object->mark(),
+                      object->klass()->external_name());
+      }
+    }
+    return m;
   }
 }
 
@@ -1376,8 +1376,8 @@
 //
 
 enum ManifestConstants {
-    ClearResponsibleAtSTW   = 0,
-    MaximumRecheckInterval  = 1000
+  ClearResponsibleAtSTW   = 0,
+  MaximumRecheckInterval  = 1000
 };
 
 // Deflate a single monitor if not in use
@@ -1391,36 +1391,36 @@
   guarantee(mid->header()->is_neutral(), "invariant");
 
   if (mid->is_busy()) {
-     if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
-     deflated = false;
+    if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
+    deflated = false;
   } else {
-     // Deflate the monitor if it is no longer being used
-     // It's idle - scavenge and return to the global free list
-     // plain old deflation ...
-     TEVENT(deflate_idle_monitors - scavenge1);
-     if (TraceMonitorInflation) {
-       if (obj->is_instance()) {
-         ResourceMark rm;
-           tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
-                (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
-       }
-     }
+    // Deflate the monitor if it is no longer being used
+    // It's idle - scavenge and return to the global free list
+    // plain old deflation ...
+    TEVENT(deflate_idle_monitors - scavenge1);
+    if (TraceMonitorInflation) {
+      if (obj->is_instance()) {
+        ResourceMark rm;
+        tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
+                      (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
+      }
+    }
 
-     // Restore the header back to obj
-     obj->release_set_mark(mid->header());
-     mid->clear();
+    // Restore the header back to obj
+    obj->release_set_mark(mid->header());
+    mid->clear();
 
-     assert(mid->object() == NULL, "invariant");
+    assert(mid->object() == NULL, "invariant");
 
-     // Move the object to the working free list defined by FreeHead,FreeTail.
-     if (*freeHeadp == NULL) *freeHeadp = mid;
-     if (*freeTailp != NULL) {
-       ObjectMonitor * prevtail = *freeTailp;
-       assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
-       prevtail->FreeNext = mid;
-      }
-     *freeTailp = mid;
-     deflated = true;
+    // Move the object to the working free list defined by FreeHead,FreeTail.
+    if (*freeHeadp == NULL) *freeHeadp = mid;
+    if (*freeTailp != NULL) {
+      ObjectMonitor * prevtail = *freeTailp;
+      assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
+      prevtail->FreeNext = mid;
+    }
+    *freeTailp = mid;
+    deflated = true;
   }
   return deflated;
 }
@@ -1434,25 +1434,25 @@
   int deflatedcount = 0;
 
   for (mid = *listheadp; mid != NULL;) {
-     oop obj = (oop) mid->object();
-     bool deflated = false;
-     if (obj != NULL) {
-       deflated = deflate_monitor(mid, obj, freeHeadp, freeTailp);
-     }
-     if (deflated) {
-       // extract from per-thread in-use-list
-       if (mid == *listheadp) {
-         *listheadp = mid->FreeNext;
-       } else if (curmidinuse != NULL) {
-         curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
-       }
-       next = mid->FreeNext;
-       mid->FreeNext = NULL;  // This mid is current tail in the FreeHead list
-       mid = next;
-       deflatedcount++;
-     } else {
-       curmidinuse = mid;
-       mid = mid->FreeNext;
+    oop obj = (oop) mid->object();
+    bool deflated = false;
+    if (obj != NULL) {
+      deflated = deflate_monitor(mid, obj, freeHeadp, freeTailp);
+    }
+    if (deflated) {
+      // extract from per-thread in-use-list
+      if (mid == *listheadp) {
+        *listheadp = mid->FreeNext;
+      } else if (curmidinuse != NULL) {
+        curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
+      }
+      next = mid->FreeNext;
+      mid->FreeNext = NULL;  // This mid is current tail in the FreeHead list
+      mid = next;
+      deflatedcount++;
+    } else {
+      curmidinuse = mid;
+      mid = mid->FreeNext;
     }
   }
   return deflatedcount;
@@ -1485,19 +1485,19 @@
       }
       nScavenged += deflatedcount;
       nInuse += cur->omInUseCount;
-     }
+    }
 
-   // For moribund threads, scan gOmInUseList
-   if (gOmInUseList) {
-     nInCirculation += gOmInUseCount;
-     int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
-     gOmInUseCount-= deflatedcount;
-     nScavenged += deflatedcount;
-     nInuse += gOmInUseCount;
+    // For moribund threads, scan gOmInUseList
+    if (gOmInUseList) {
+      nInCirculation += gOmInUseCount;
+      int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
+      gOmInUseCount-= deflatedcount;
+      nScavenged += deflatedcount;
+      nInuse += gOmInUseCount;
     }
 
   } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
-  // Iterate over all extant monitors - Scavenge all idle monitors.
+    // Iterate over all extant monitors - Scavenge all idle monitors.
     assert(block->object() == CHAINMARKER, "must be a block header");
     nInCirculation += _BLOCKSIZE;
     for (int i = 1; i < _BLOCKSIZE; i++) {
@@ -1529,8 +1529,8 @@
 
   if (ObjectMonitor::Knob_Verbose) {
     ::printf("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
-        nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
-        MonitorPopulation, MonitorFreeCount);
+             nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
+             MonitorPopulation, MonitorFreeCount);
     ::fflush(stdout);
   }
 
@@ -1538,11 +1538,11 @@
 
   // Move the scavenged monitors back to the global free list.
   if (FreeHead != NULL) {
-     guarantee(FreeTail != NULL && nScavenged > 0, "invariant");
-     assert(FreeTail->FreeNext == NULL, "invariant");
-     // constant-time list splice - prepend scavenged segment to gFreeList
-     FreeTail->FreeNext = gFreeList;
-     gFreeList = FreeHead;
+    guarantee(FreeTail != NULL && nScavenged > 0, "invariant");
+    assert(FreeTail->FreeNext == NULL, "invariant");
+    // constant-time list splice - prepend scavenged segment to gFreeList
+    FreeTail->FreeNext = gFreeList;
+    gFreeList = FreeHead;
   }
   Thread::muxRelease(&ListLock);
 
@@ -1561,10 +1561,10 @@
 // Gives up on a particular monitor if an exception occurs, but continues
 // the overall iteration, swallowing the exception.
 class ReleaseJavaMonitorsClosure: public MonitorClosure {
-private:
+ private:
   TRAPS;
 
-public:
+ public:
   ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
   void do_monitor(ObjectMonitor* mid) {
     if (mid->owner() == THREAD) {
--- a/hotspot/src/share/vm/runtime/thread.cpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Wed Sep 10 11:48:20 2014 -0600
@@ -148,7 +148,7 @@
     size_t aligned_size = size + (alignment - sizeof(intptr_t));
     void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
                                           : AllocateHeap(aligned_size, flags, CURRENT_PC,
-                                              AllocFailStrategy::RETURN_NULL);
+                                                         AllocFailStrategy::RETURN_NULL);
     void* aligned_addr     = (void*) align_size_up((intptr_t) real_malloc_addr, alignment);
     assert(((uintptr_t) aligned_addr + (uintptr_t) size) <=
            ((uintptr_t) real_malloc_addr + (uintptr_t) aligned_size),
@@ -365,7 +365,7 @@
 #ifdef ASSERT
 // Private method to check for dangling thread pointer
 void check_for_dangling_thread_pointer(Thread *thread) {
- assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
+  assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
          "possibility of dangling Thread pointer");
 }
 #endif
@@ -517,8 +517,8 @@
         ResourceMark rm;
 
         tty->print_cr(
-            "Failed wait_for_ext_suspend_completion(thread=%s, debug_bits=%x)",
-            jt->get_thread_name(), *bits);
+                      "Failed wait_for_ext_suspend_completion(thread=%s, debug_bits=%x)",
+                      jt->get_thread_name(), *bits);
 
         guarantee(!AssertOnSuspendWaitFailure, "external suspend wait failed");
       }
@@ -654,7 +654,7 @@
 // Returns true if the thread is externally suspended and false otherwise.
 //
 bool JavaThread::wait_for_ext_suspend_completion(int retries, int delay,
-       uint32_t *bits) {
+                                                 uint32_t *bits) {
   TraceSuspendDebugBits tsdb(this, true /* is_wait */,
                              false /* !called_by_wait */, bits);
 
@@ -759,8 +759,8 @@
   bool gotframe = false;
   // self suspension saves needed state.
   if (has_last_Java_frame() && _anchor.walkable()) {
-     *_fr = pd_last_frame();
-     gotframe = true;
+    *_fr = pd_last_frame();
+    gotframe = true;
   }
   return gotframe;
 }
@@ -790,7 +790,7 @@
     } else {
       guarantee(res == strong_roots_parity, "Or else what?");
       assert(SharedHeap::heap()->workers()->active_workers() > 0,
-         "Should only fail when parallel.");
+             "Should only fail when parallel.");
       return false;
     }
   }
@@ -882,38 +882,38 @@
 // invoke the vm-thread (i.e., and oop allocation). In that case, we also have to make sure that
 // no threads which allow_vm_block's are held
 void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
-    // Check if current thread is allowed to block at a safepoint
-    if (!(_allow_safepoint_count == 0))
-      fatal("Possible safepoint reached by thread that does not allow it");
-    if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
-      fatal("LEAF method calling lock?");
-    }
+  // Check if current thread is allowed to block at a safepoint
+  if (!(_allow_safepoint_count == 0))
+    fatal("Possible safepoint reached by thread that does not allow it");
+  if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
+    fatal("LEAF method calling lock?");
+  }
 
 #ifdef ASSERT
-    if (potential_vm_operation && is_Java_thread()
-        && !Universe::is_bootstrapping()) {
-      // Make sure we do not hold any locks that the VM thread also uses.
-      // This could potentially lead to deadlocks
-      for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
-        // Threads_lock is special, since the safepoint synchronization will not start before this is
-        // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
-        // since it is used to transfer control between JavaThreads and the VMThread
-        // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
-        if ((cur->allow_vm_block() &&
-              cur != Threads_lock &&
-              cur != Compile_lock &&               // Temporary: should not be necessary when we get separate compilation
-              cur != VMOperationRequest_lock &&
-              cur != VMOperationQueue_lock) ||
-              cur->rank() == Mutex::special) {
-          fatal(err_msg("Thread holding lock at safepoint that vm can block on: %s", cur->name()));
-        }
+  if (potential_vm_operation && is_Java_thread()
+      && !Universe::is_bootstrapping()) {
+    // Make sure we do not hold any locks that the VM thread also uses.
+    // This could potentially lead to deadlocks
+    for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
+      // Threads_lock is special, since the safepoint synchronization will not start before this is
+      // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
+      // since it is used to transfer control between JavaThreads and the VMThread
+      // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
+      if ((cur->allow_vm_block() &&
+           cur != Threads_lock &&
+           cur != Compile_lock &&               // Temporary: should not be necessary when we get separate compilation
+           cur != VMOperationRequest_lock &&
+           cur != VMOperationQueue_lock) ||
+           cur->rank() == Mutex::special) {
+        fatal(err_msg("Thread holding lock at safepoint that vm can block on: %s", cur->name()));
       }
     }
-
-    if (GCALotAtAllSafepoints) {
-      // We could enter a safepoint here and thus have a gc
-      InterfaceSupport::check_gc_alot();
-    }
+  }
+
+  if (GCALotAtAllSafepoints) {
+    // We could enter a safepoint here and thus have a gc
+    InterfaceSupport::check_gc_alot();
+  }
 #endif
 }
 #endif
@@ -947,7 +947,7 @@
 }
 
 bool Thread::set_as_starting_thread() {
- // NOTE: this must be called inside the main thread.
+  // NOTE: this must be called inside the main thread.
   return os::create_main_thread((JavaThread*)this);
 }
 
@@ -1004,12 +1004,12 @@
 
   JavaValue result(T_VOID);
   JavaCalls::call_special(&result, thread_oop,
-                                   klass,
-                                   vmSymbols::object_initializer_name(),
-                                   vmSymbols::threadgroup_string_void_signature(),
-                                   thread_group,
-                                   string,
-                                   CHECK_NULL);
+                          klass,
+                          vmSymbols::object_initializer_name(),
+                          vmSymbols::threadgroup_string_void_signature(),
+                          thread_group,
+                          string,
+                          CHECK_NULL);
   return thread_oop();
 }
 
@@ -1019,7 +1019,7 @@
 
   JavaValue result(T_VOID);
   JavaCalls::call_static(&result, klass, vmSymbols::initializeSystemClass_name(),
-                                         vmSymbols::void_method_signature(), CHECK);
+                         vmSymbols::void_method_signature(), CHECK);
 }
 
 char java_runtime_name[128] = "";
@@ -1028,7 +1028,7 @@
 // extract the JRE name from sun.misc.Version.java_runtime_name
 static const char* get_java_runtime_name(TRAPS) {
   Klass* k = SystemDictionary::find(vmSymbols::sun_misc_Version(),
-                                      Handle(), Handle(), CHECK_AND_CLEAR_NULL);
+                                    Handle(), Handle(), CHECK_AND_CLEAR_NULL);
   fieldDescriptor fd;
   bool found = k != NULL &&
                InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_name_name(),
@@ -1049,7 +1049,7 @@
 // extract the JRE version from sun.misc.Version.java_runtime_version
 static const char* get_java_runtime_version(TRAPS) {
   Klass* k = SystemDictionary::find(vmSymbols::sun_misc_Version(),
-                                      Handle(), Handle(), CHECK_AND_CLEAR_NULL);
+                                    Handle(), Handle(), CHECK_AND_CLEAR_NULL);
   fieldDescriptor fd;
   bool found = k != NULL &&
                InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_version_name(),
@@ -1075,8 +1075,8 @@
   if (klass.not_null()) {
     JavaValue result(T_VOID);
     JavaCalls::call_static(&result, klass, vmSymbols::run_method_name(),
-                                           vmSymbols::void_method_signature(),
-                                           CHECK);
+                           vmSymbols::void_method_signature(),
+                           CHECK);
   }
 }
 
@@ -1146,7 +1146,7 @@
 
 
   if (daemon) {
-      java_lang_Thread::set_daemon(thread_oop());
+    java_lang_Thread::set_daemon(thread_oop());
   }
 
   if (HAS_PENDING_EXCEPTION) {
@@ -1157,12 +1157,12 @@
   Handle threadObj(this, this->threadObj());
 
   JavaCalls::call_special(&result,
-                         thread_group,
-                         group,
-                         vmSymbols::add_method_name(),
-                         vmSymbols::thread_void_signature(),
-                         threadObj,          // Arg 1
-                         THREAD);
+                          thread_group,
+                          group,
+                          vmSymbols::add_method_name(),
+                          vmSymbols::thread_void_signature(),
+                          threadObj,          // Arg 1
+                          THREAD);
 
 
 }
@@ -1246,25 +1246,25 @@
     jlong now = os::javaTimeNanos();
 
     if (remaining == 0) {
-        // if we didn't have any tasks we could have waited for a long time
-        // consider the time_slept zero and reset time_before_loop
-        time_slept = 0;
-        time_before_loop = now;
+      // if we didn't have any tasks we could have waited for a long time
+      // consider the time_slept zero and reset time_before_loop
+      time_slept = 0;
+      time_before_loop = now;
     } else {
-        // need to recalculate since we might have new tasks in _tasks
-        time_slept = (int) ((now - time_before_loop) / 1000000);
+      // need to recalculate since we might have new tasks in _tasks
+      time_slept = (int) ((now - time_before_loop) / 1000000);
     }
 
     // Change to task list or spurious wakeup of some kind
     if (timedout || _should_terminate) {
-        break;
+      break;
     }
 
     remaining = PeriodicTask::time_to_wait();
     if (remaining == 0) {
-        // Last task was just disenrolled so loop around and wait until
-        // another task gets enrolled
-        continue;
+      // Last task was just disenrolled so loop around and wait until
+      // another task gets enrolled
+      continue;
     }
 
     remaining -= time_slept;
@@ -1302,13 +1302,13 @@
 
       for (;;) {
         if (!ShowMessageBoxOnError
-         && (OnError == NULL || OnError[0] == '\0')
-         && Arguments::abort_hook() == NULL) {
-             os::sleep(this, 2 * 60 * 1000, false);
-             fdStream err(defaultStream::output_fd());
-             err.print_raw_cr("# [ timer expired, abort... ]");
-             // skip atexit/vm_exit/vm_abort hooks
-             os::die();
+            && (OnError == NULL || OnError[0] == '\0')
+            && Arguments::abort_hook() == NULL) {
+          os::sleep(this, 2 * 60 * 1000, false);
+          fdStream err(defaultStream::output_fd());
+          err.print_raw_cr("# [ timer expired, abort... ]");
+          // skip atexit/vm_exit/vm_abort hooks
+          os::die();
         }
 
         // Wake up 5 seconds later, the fatal handler may reset OnError or
@@ -1486,10 +1486,10 @@
 #endif // INCLUDE_ALL_GCS
 
 JavaThread::JavaThread(bool is_attaching_via_jni) :
-  Thread()
+                       Thread()
 #if INCLUDE_ALL_GCS
-  , _satb_mark_queue(&_satb_mark_queue_set),
-  _dirty_card_queue(&_dirty_card_queue_set)
+                       , _satb_mark_queue(&_satb_mark_queue_set),
+                       _dirty_card_queue(&_dirty_card_queue_set)
 #endif // INCLUDE_ALL_GCS
 {
   initialize();
@@ -1543,10 +1543,10 @@
 static void compiler_thread_entry(JavaThread* thread, TRAPS);
 
 JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
-  Thread()
+                       Thread()
 #if INCLUDE_ALL_GCS
-  , _satb_mark_queue(&_satb_mark_queue_set),
-  _dirty_card_queue(&_dirty_card_queue_set)
+                       , _satb_mark_queue(&_satb_mark_queue_set),
+                       _dirty_card_queue(&_dirty_card_queue_set)
 #endif // INCLUDE_ALL_GCS
 {
   if (TraceThreadEvents) {
@@ -1575,7 +1575,7 @@
 
 JavaThread::~JavaThread() {
   if (TraceThreadEvents) {
-      tty->print_cr("terminate thread %p", this);
+    tty->print_cr("terminate thread %p", this);
   }
 
   // JSR166 -- return the parker to the free list
@@ -1649,8 +1649,8 @@
 
   EventThreadStart event;
   if (event.should_commit()) {
-     event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
-     event.commit();
+    event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
+    event.commit();
   }
 
   // We call another function to do the rest so we are sure that the stack addresses used
@@ -1742,10 +1742,10 @@
       if (HAS_PENDING_EXCEPTION) {
         ResourceMark rm(this);
         jio_fprintf(defaultStream::error_stream(),
-              "\nException: %s thrown from the UncaughtExceptionHandler"
-              " in thread \"%s\"\n",
-              pending_exception()->klass()->external_name(),
-              get_thread_name());
+                    "\nException: %s thrown from the UncaughtExceptionHandler"
+                    " in thread \"%s\"\n",
+                    pending_exception()->klass()->external_name(),
+                    get_thread_name());
         CLEAR_PENDING_EXCEPTION;
       }
     }
@@ -1754,8 +1754,8 @@
     // from java_lang_Thread object
     EventThreadEnd event;
     if (event.should_commit()) {
-        event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
-        event.commit();
+      event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
+      event.commit();
     }
 
     // Call after last event on thread
@@ -1771,10 +1771,10 @@
         JavaValue result(T_VOID);
         KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
         JavaCalls::call_virtual(&result,
-                              threadObj, thread_klass,
-                              vmSymbols::exit_method_name(),
-                              vmSymbols::void_method_signature(),
-                              THREAD);
+                                threadObj, thread_klass,
+                                vmSymbols::exit_method_name(),
+                                vmSymbols::void_method_signature(),
+                                THREAD);
         CLEAR_PENDING_EXCEPTION;
       }
     }
@@ -2062,22 +2062,22 @@
     condition = _no_async_condition;  // done
     switch (thread_state()) {
     case _thread_in_vm:
-      {
-        JavaThread* THREAD = this;
-        THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
-      }
+    {
+      JavaThread* THREAD = this;
+      THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
+    }
     case _thread_in_native:
-      {
-        ThreadInVMfromNative tiv(this);
-        JavaThread* THREAD = this;
-        THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
-      }
+    {
+      ThreadInVMfromNative tiv(this);
+      JavaThread* THREAD = this;
+      THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
+    }
     case _thread_in_Java:
-      {
-        ThreadInVMfromJava tiv(this);
-        JavaThread* THREAD = this;
-        THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in a recent unsafe memory access operation in compiled Java code");
-      }
+    {
+      ThreadInVMfromJava tiv(this);
+      JavaThread* THREAD = this;
+      THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in a recent unsafe memory access operation in compiled Java code");
+    }
     default:
       ShouldNotReachHere();
     }
@@ -2170,8 +2170,8 @@
       set_pending_async_exception(java_throwable);
 
       if (TraceExceptions) {
-       ResourceMark rm;
-       tty->print_cr("Pending Async. exception installed of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
+        ResourceMark rm;
+        tty->print_cr("Pending Async. exception installed of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
       }
       // for AbortVMOnException flag
       NOT_PRODUCT(Exceptions::debug_check_abort(InstanceKlass::cast(_pending_async_exception->klass())->external_name()));
@@ -2198,7 +2198,7 @@
 void JavaThread::java_suspend() {
   { MutexLocker mu(Threads_lock);
     if (!Threads::includes(this) || is_exiting() || this->threadObj() == NULL) {
-       return;
+      return;
     }
   }
 
@@ -2241,18 +2241,18 @@
 
   // we are in the process of exiting so don't suspend
   if (is_exiting()) {
-     clear_external_suspend();
-     return ret;
+    clear_external_suspend();
+    return ret;
   }
 
   assert(_anchor.walkable() ||
-    (is_Java_thread() && !((JavaThread*)this)->has_last_Java_frame()),
-    "must have walkable stack");
+         (is_Java_thread() && !((JavaThread*)this)->has_last_Java_frame()),
+         "must have walkable stack");
 
   MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
 
   assert(!this->is_ext_suspended(),
-    "a thread trying to self-suspend should not already be suspended");
+         "a thread trying to self-suspend should not already be suspended");
 
   if (this->is_suspend_equivalent()) {
     // If we are self-suspending as a result of the lifting of a
@@ -2289,12 +2289,12 @@
 // hence doesn't need protection from concurrent access at this stage
 void JavaThread::verify_not_published() {
   if (!Threads_lock->owned_by_self()) {
-   MutexLockerEx ml(Threads_lock,  Mutex::_no_safepoint_check_flag);
-   assert(!Threads::includes(this),
+    MutexLockerEx ml(Threads_lock,  Mutex::_no_safepoint_check_flag);
+    assert(!Threads::includes(this),
            "java thread shouldn't have been published yet!");
   }
   else {
-   assert(!Threads::includes(this),
+    assert(!Threads::includes(this),
            "java thread shouldn't have been published yet!");
   }
 }
@@ -2474,7 +2474,7 @@
     if (os::unguard_memory((char *) low_addr, len)) {
       _stack_guard_state = stack_guard_unused;
     } else {
-        warning("Attempt to unprotect stack guard pages failed.");
+      warning("Attempt to unprotect stack guard pages failed.");
     }
   }
 }
@@ -2640,7 +2640,7 @@
 // the given JavaThread in its _processed_thread field.
 class RememberProcessedThread: public StackObj {
   NamedThread* _cur_thr;
-public:
+ public:
   RememberProcessedThread(JavaThread* jthr) {
     Thread* thread = Thread::current();
     if (thread->is_Named_thread()) {
@@ -2669,7 +2669,7 @@
   Thread::oops_do(f, cld_f, cf);
 
   assert((!has_last_Java_frame() && java_call_counter() == 0) ||
-          (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
+         (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
 
   if (has_last_Java_frame()) {
     // Record JavaThread to GC thread
@@ -2729,7 +2729,7 @@
   Thread::nmethods_do(cf);  // (super method is a no-op)
 
   assert((!has_last_Java_frame() && java_call_counter() == 0) ||
-          (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
+         (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
 
   if (has_last_Java_frame()) {
     // Traverse the execution stack
@@ -2809,7 +2809,7 @@
   st->print("JavaThread \"%s\"", get_thread_name_string(buf, buflen));
   oop thread_obj = threadObj();
   if (thread_obj != NULL) {
-     if (java_lang_Thread::is_daemon(thread_obj)) st->print(" daemon");
+    if (java_lang_Thread::is_daemon(thread_obj)) st->print(" daemon");
   }
   st->print(" [");
   st->print("%s", _get_thread_state_name(_thread_state));
@@ -2853,7 +2853,7 @@
     }
   }
 #endif // ASSERT
-    return get_thread_name_string();
+  return get_thread_name_string();
 }
 
 // Returns a non-NULL representation of this thread's name, or a suitable
@@ -2950,7 +2950,7 @@
   Handle thread_oop(Thread::current(),
                     JNIHandles::resolve_non_null(jni_thread));
   assert(InstanceKlass::cast(thread_oop->klass())->is_linked(),
-    "must be initialized");
+         "must be initialized");
   set_threadObj(thread_oop());
   java_lang_Thread::set_thread(thread_oop(), this);
 
@@ -3383,7 +3383,7 @@
 
   if (!main_thread->set_as_starting_thread()) {
     vm_shutdown_during_initialization(
-      "Failed necessary internal allocation. Out of swap space");
+                                      "Failed necessary internal allocation. Out of swap space");
     delete main_thread;
     *canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
     return JNI_ENOMEM;
@@ -3583,17 +3583,17 @@
   }
 
   {
-      MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
-      // Make sure the watcher thread can be started by WatcherThread::start()
-      // or by dynamic enrollment.
-      WatcherThread::make_startable();
-      // Start up the WatcherThread if there are any periodic tasks
-      // NOTE:  All PeriodicTasks should be registered by now. If they
-      //   aren't, late joiners might appear to start slowly (we might
-      //   take a while to process their first tick).
-      if (PeriodicTask::num_tasks() > 0) {
-          WatcherThread::start();
-      }
+    MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+    // Make sure the watcher thread can be started by WatcherThread::start()
+    // or by dynamic enrollment.
+    WatcherThread::make_startable();
+    // Start up the WatcherThread if there are any periodic tasks
+    // NOTE:  All PeriodicTasks should be registered by now. If they
+    //   aren't, late joiners might appear to start slowly (we might
+    //   take a while to process their first tick).
+    if (PeriodicTask::num_tasks() > 0) {
+      WatcherThread::start();
+    }
   }
 
   // Give os specific code one last chance to start
@@ -3749,10 +3749,10 @@
 
     // Find the Agent_OnUnload function.
     Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
-      os::find_agent_function(agent,
-      false,
-      on_unload_symbols,
-      num_symbol_entries));
+                                                   os::find_agent_function(agent,
+                                                   false,
+                                                   on_unload_symbols,
+                                                   num_symbol_entries));
 
     // Invoke the Agent_OnUnload function
     if (unload_entry != NULL) {
@@ -4060,7 +4060,7 @@
   bool is_par = sh->n_par_threads() > 0;
   assert(!is_par ||
          (SharedHeap::heap()->n_par_threads() ==
-          SharedHeap::heap()->workers()->active_workers()), "Mismatch");
+         SharedHeap::heap()->workers()->active_workers()), "Mismatch");
   int cp = SharedHeap::heap()->strong_roots_parity();
   ALL_JAVA_THREADS(p) {
     if (p->claim_oops_do(is_par, cp)) {
@@ -4113,9 +4113,9 @@
 
 // Get count Java threads that are waiting to enter the specified monitor.
 GrowableArray<JavaThread*>* Threads::get_pending_threads(int count,
-  address monitor, bool doLock) {
+                                                         address monitor, bool doLock) {
   assert(doLock || SafepointSynchronize::is_at_safepoint(),
-    "must grab Threads_lock or be at safepoint");
+         "must grab Threads_lock or be at safepoint");
   GrowableArray<JavaThread*>* result = new GrowableArray<JavaThread*>(count);
 
   int i = 0;
@@ -4181,10 +4181,10 @@
   st->print_cr("%s", os::local_time_string(buf, sizeof(buf)));
 
   st->print_cr("Full thread dump %s (%s %s):",
-                Abstract_VM_Version::vm_name(),
-                Abstract_VM_Version::vm_release(),
-                Abstract_VM_Version::vm_info_string()
-               );
+               Abstract_VM_Version::vm_name(),
+               Abstract_VM_Version::vm_release(),
+               Abstract_VM_Version::vm_info_string()
+              );
   st->cr();
 
 #if INCLUDE_ALL_GCS
@@ -4303,7 +4303,7 @@
 
 void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
   if (Atomic::cmpxchg (1, adr, 0) == 0) {
-     return;   // normal fast-path return
+    return;   // normal fast-path return
   }
 
   // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
@@ -4311,20 +4311,20 @@
   int ctr = 0;
   int Yields = 0;
   for (;;) {
-     while (*adr != 0) {
-        ++ctr;
-        if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
-           if (Yields > 5) {
-             os::naked_short_sleep(1);
-           } else {
-             os::naked_yield();
-             ++Yields;
-           }
+    while (*adr != 0) {
+      ++ctr;
+      if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
+        if (Yields > 5) {
+          os::naked_short_sleep(1);
         } else {
-           SpinPause();
+          os::naked_yield();
+          ++Yields;
         }
-     }
-     if (Atomic::cmpxchg(1, adr, 0) == 0) return;
+      } else {
+        SpinPause();
+      }
+    }
+    if (Atomic::cmpxchg(1, adr, 0) == 0) return;
   }
 }
 
@@ -4401,45 +4401,45 @@
   intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0);
   if (w == 0) return;
   if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
-     return;
+    return;
   }
 
   TEVENT(muxAcquire - Contention);
   ParkEvent * const Self = Thread::current()->_MuxEvent;
   assert((intptr_t(Self) & LOCKBIT) == 0, "invariant");
   for (;;) {
-     int its = (os::is_MP() ? 100 : 0) + 1;
-
-     // Optional spin phase: spin-then-park strategy
-     while (--its >= 0) {
-       w = *Lock;
-       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+    int its = (os::is_MP() ? 100 : 0) + 1;
+
+    // Optional spin phase: spin-then-park strategy
+    while (--its >= 0) {
+      w = *Lock;
+      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+        return;
+      }
+    }
+
+    Self->reset();
+    Self->OnList = intptr_t(Lock);
+    // The following fence() isn't _strictly necessary as the subsequent
+    // CAS() both serializes execution and ratifies the fetched *Lock value.
+    OrderAccess::fence();
+    for (;;) {
+      w = *Lock;
+      if ((w & LOCKBIT) == 0) {
+        if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+          Self->OnList = 0;   // hygiene - allows stronger asserts
           return;
-       }
-     }
-
-     Self->reset();
-     Self->OnList = intptr_t(Lock);
-     // The following fence() isn't _strictly necessary as the subsequent
-     // CAS() both serializes execution and ratifies the fetched *Lock value.
-     OrderAccess::fence();
-     for (;;) {
-        w = *Lock;
-        if ((w & LOCKBIT) == 0) {
-            if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
-                Self->OnList = 0;   // hygiene - allows stronger asserts
-                return;
-            }
-            continue;      // Interference -- *Lock changed -- Just retry
         }
-        assert(w & LOCKBIT, "invariant");
-        Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
-        if (Atomic::cmpxchg_ptr(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
-     }
-
-     while (Self->OnList != 0) {
-        Self->park();
-     }
+        continue;      // Interference -- *Lock changed -- Just retry
+      }
+      assert(w & LOCKBIT, "invariant");
+      Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
+      if (Atomic::cmpxchg_ptr(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
+    }
+
+    while (Self->OnList != 0) {
+      Self->park();
+    }
   }
 }
 
--- a/hotspot/src/share/vm/runtime/thread.hpp	Fri Aug 29 08:14:19 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Wed Sep 10 11:48:20 2014 -0600
@@ -115,7 +115,7 @@
   void  operator delete(void* p);
 
  protected:
-   static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
+  static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
  private:
 
   // ***************************************************************
@@ -225,10 +225,10 @@
   // claimed as a task.
   jint _oops_do_parity;
 
-  public:
-   void set_last_handle_mark(HandleMark* mark)   { _last_handle_mark = mark; }
-   HandleMark* last_handle_mark() const          { return _last_handle_mark; }
-  private:
+ public:
+  void set_last_handle_mark(HandleMark* mark)   { _last_handle_mark = mark; }
+  HandleMark* last_handle_mark() const          { return _last_handle_mark; }
+ private:
 
   // debug support for checking if code does allow safepoints or not
   // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on
@@ -445,9 +445,9 @@
   virtual void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
   // Handles the parallel case for the method below.
-private:
+ private:
   bool claim_oops_do_par_case(int collection_parity);
-public:
+ public:
   // Requires that "collection_parity" is that of the current roots
   // iteration.  If "is_par" is false, sets the parity of "this" to
   // "collection_parity", and returns "true".  If "is_par" is true,
@@ -664,9 +664,9 @@
 
 // Worker threads are named and have an id of an assigned work.
 class WorkerThread: public NamedThread {
-private:
+ private:
   uint _id;
-public:
+ public:
   WorkerThread() : _id(0)               { }
   virtual bool is_Worker_thread() const { return true; }
 
@@ -844,7 +844,7 @@
                                                  // handlers thread is in
   volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
   bool                  _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was
-                                                 // never locked) when throwing an exception. Used by interpreter only.
+  // never locked) when throwing an exception. Used by interpreter only.
 
   // JNI attach states:
   enum JNIAttachStates {
@@ -898,11 +898,11 @@
 #ifndef PRODUCT
   int _jmp_ring_index;
   struct {
-      // We use intptr_t instead of address so debugger doesn't try and display strings
-      intptr_t _target;
-      intptr_t _instruction;
-      const char*  _file;
-      int _line;
+    // We use intptr_t instead of address so debugger doesn't try and display strings
+    intptr_t _target;
+    intptr_t _instruction;
+    const char*  _file;
+    int _line;
   }   _jmp_ring[jump_ring_buffer_size];
 #endif /* PRODUCT */
 
@@ -1113,7 +1113,7 @@
   // when a suspend equivalent condition lifts.
   bool handle_special_suspend_equivalent_condition() {
     assert(is_suspend_equivalent(),
-      "should only be called in a suspend equivalence condition");
+           "should only be called in a suspend equivalence condition");
     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
     bool ret = is_external_suspend();
     if (!ret) {
@@ -1339,10 +1339,10 @@
     // Only return NULL if thread is off the thread list; starting to
     // exit should not return NULL.
     if (thread_from_jni_env->is_terminated()) {
-       thread_from_jni_env->block_if_vm_exited();
-       return NULL;
+      thread_from_jni_env->block_if_vm_exited();
+      return NULL;
     } else {
-       return thread_from_jni_env;
+      return thread_from_jni_env;
     }
   }
 
@@ -1352,12 +1352,12 @@
   void enter_critical() { assert(Thread::current() == this ||
                                  Thread::current()->is_VM_thread() && SafepointSynchronize::is_synchronizing(),
                                  "this must be current thread or synchronizing");
-                          _jni_active_critical++; }
+  _jni_active_critical++; }
   void exit_critical()  { assert(Thread::current() == this,
                                  "this must be current thread");
-                          _jni_active_critical--;
-                          assert(_jni_active_critical >= 0,
-                                 "JNI critical nesting problem?"); }
+  _jni_active_critical--;
+  assert(_jni_active_critical >= 0,
+         "JNI critical nesting problem?"); }
 
   // Checked JNI, is the programmer required to check for exceptions, specify which function name
   bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; }
@@ -1411,10 +1411,10 @@
   void print_on_error(outputStream* st, char* buf, int buflen) const;
   void verify();
   const char* get_thread_name() const;
-private:
+ private:
   // factor out low-level mechanics for use in both normal and error cases
   const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const;
-public:
+ public:
   const char* get_threadgroup_name() const;
   const char* get_parent_name() const;
 
@@ -1456,20 +1456,20 @@
 
   // Profiling operation (see fprofile.cpp)
  public:
-   bool profile_last_Java_frame(frame* fr);
+  bool profile_last_Java_frame(frame* fr);
 
  private:
-   ThreadProfiler* _thread_profiler;
+  ThreadProfiler* _thread_profiler;
  private:
-   friend class FlatProfiler;                    // uses both [gs]et_thread_profiler.
-   friend class FlatProfilerTask;                // uses get_thread_profiler.
-   friend class ThreadProfilerMark;              // uses get_thread_profiler.
-   ThreadProfiler* get_thread_profiler()         { return _thread_profiler; }
-   ThreadProfiler* set_thread_profiler(ThreadProfiler* tp) {
-     ThreadProfiler* result = _thread_profiler;
-     _thread_profiler = tp;
-     return result;
-   }
+  friend class FlatProfiler;                    // uses both [gs]et_thread_profiler.
+  friend class FlatProfilerTask;                // uses get_thread_profiler.
+  friend class ThreadProfilerMark;              // uses get_thread_profiler.
+  ThreadProfiler* get_thread_profiler()         { return _thread_profiler; }
+  ThreadProfiler* set_thread_profiler(ThreadProfiler* tp) {
+    ThreadProfiler* result = _thread_profiler;
+    _thread_profiler = tp;
+    return result;
+  }
 
  public:
   // Returns the running thread as a JavaThread
@@ -1692,15 +1692,15 @@
 
 
   // JSR166 per-thread parker
-private:
+ private:
   Parker*    _parker;
-public:
+ public:
   Parker*     parker() { return _parker; }
 
   // Biased locking support
-private:
+ private:
   GrowableArray<MonitorInfo*>* _cached_monitor_info;
-public:
+ public:
   GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; }
   void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; }
 
@@ -1708,12 +1708,12 @@
   bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; }
   bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; }
   inline void set_done_attaching_via_jni();
-private:
+ private:
   // This field is used to determine if a thread has claimed
   // a par_id: it is UINT_MAX if the thread has not claimed a par_id;
   // otherwise its value is the par_id that has been claimed.
   uint _claimed_par_id;
-public:
+ public:
   uint get_claimed_par_id() { return _claimed_par_id; }
   void set_claimed_par_id(uint id) { _claimed_par_id = id; }
 };
@@ -1782,9 +1782,9 @@
   void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
 #ifndef PRODUCT
-private:
+ private:
   IdealGraphPrinter *_ideal_graph_printer;
-public:
+ public:
   IdealGraphPrinter *ideal_graph_printer()                       { return _ideal_graph_printer; }
   void set_ideal_graph_printer(IdealGraphPrinter *n)             { _ideal_graph_printer = n; }
 #endif
@@ -1885,13 +1885,13 @@
   // is true, then Threads_lock is grabbed as needed. Otherwise, the
   // VM needs to be at a safepoint.
   static GrowableArray<JavaThread*>* get_pending_threads(int count,
-    address monitor, bool doLock);
+                                                         address monitor, bool doLock);
 
   // Get owning Java thread from the monitor's owner field. If doLock
   // is true, then Threads_lock is grabbed as needed. Otherwise, the
   // VM needs to be at a safepoint.
   static JavaThread *owning_thread_from_monitor_owner(address owner,
-    bool doLock);
+                                                      bool doLock);
 
   // Number of threads on the active threads list
   static int number_of_threads()                 { return _number_of_threads; }
@@ -1911,9 +1911,9 @@
 };
 
 class SignalHandlerMark: public StackObj {
-private:
+ private:
   Thread* _thread;
-public:
+ public:
   SignalHandlerMark(Thread* t) {
     _thread = t;
     if (_thread) _thread->enter_signal_handler();