src/hotspot/share/runtime/thread.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 55005 9b70ebd131b4
child 58679 9c3209ff7550
--- a/src/hotspot/share/runtime/thread.cpp	Thu Oct 17 20:27:44 2019 +0100
+++ b/src/hotspot/share/runtime/thread.cpp	Thu Oct 17 20:53:35 2019 +0100
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "jvm.h"
+#include "aot/aotLoader.hpp"
 #include "classfile/classLoader.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/moduleEntry.hpp"
@@ -165,14 +166,14 @@
 
 #ifndef USE_LIBRARY_BASED_TLS_ONLY
 // Current thread is maintained as a thread-local variable
-THREAD_LOCAL_DECL Thread* Thread::_thr_current = NULL;
+THREAD_LOCAL Thread* Thread::_thr_current = NULL;
 #endif
 
 // ======= Thread ========
 // Support for forcing alignment of thread objects for biased locking
 void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
   if (UseBiasedLocking) {
-    const int alignment = markOopDesc::biased_lock_alignment;
+    const size_t alignment = markWord::biased_lock_alignment;
     size_t aligned_size = size + (alignment - sizeof(intptr_t));
     void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
                                           : AllocateHeap(aligned_size, flags, CURRENT_PC,
@@ -222,7 +223,6 @@
   // stack and get_thread
   set_stack_base(NULL);
   set_stack_size(0);
-  set_self_raw_id(0);
   set_lgrp_id(-1);
   DEBUG_ONLY(clear_suspendible_thread();)
 
@@ -249,8 +249,7 @@
 
   // plain initialization
   debug_only(_owned_locks = NULL;)
-  debug_only(_allow_allocation_count = 0;)
-  NOT_PRODUCT(_allow_safepoint_count = 0;)
+  NOT_PRODUCT(_no_safepoint_count = 0;)
   NOT_PRODUCT(_skip_gcalot = false;)
   _jvmti_env_iteration_count = 0;
   set_allocated_bytes(0);
@@ -259,12 +258,13 @@
   _current_pending_monitor = NULL;
   _current_pending_monitor_is_from_java = true;
   _current_waiting_monitor = NULL;
+  _current_pending_raw_monitor = NULL;
   _num_nested_signal = 0;
-  omFreeList = NULL;
-  omFreeCount = 0;
-  omFreeProvision = 32;
-  omInUseList = NULL;
-  omInUseCount = 0;
+  om_free_list = NULL;
+  om_free_count = 0;
+  om_free_provision = 32;
+  om_in_use_list = NULL;
+  om_in_use_count = 0;
 
 #ifdef ASSERT
   _visited_for_critical_count = false;
@@ -292,7 +292,6 @@
   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
   // and ::Release()
   _ParkEvent   = ParkEvent::Allocate(this);
-  _SleepEvent  = ParkEvent::Allocate(this);
   _MuxEvent    = ParkEvent::Allocate(this);
 
 #ifdef CHECK_UNHANDLED_OOPS
@@ -302,9 +301,9 @@
 #endif // CHECK_UNHANDLED_OOPS
 #ifdef ASSERT
   if (UseBiasedLocking) {
-    assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
+    assert(is_aligned(this, markWord::biased_lock_alignment), "forced alignment of thread object failed");
     assert(this == _real_malloc_address ||
-           this == align_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment),
+           this == align_up(_real_malloc_address, markWord::biased_lock_alignment),
            "bug in forced alignment of thread objects");
   }
 #endif // ASSERT
@@ -457,7 +456,6 @@
   // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads.
   // We NULL out the fields for good hygiene.
   ParkEvent::Release(_ParkEvent); _ParkEvent   = NULL;
-  ParkEvent::Release(_SleepEvent); _SleepEvent  = NULL;
   ParkEvent::Release(_MuxEvent); _MuxEvent    = NULL;
 
   delete handle_area();
@@ -859,36 +857,6 @@
   return true;
 }
 
-#ifndef PRODUCT
-void JavaThread::record_jump(address target, address instr, const char* file,
-                             int line) {
-
-  // This should not need to be atomic as the only way for simultaneous
-  // updates is via interrupts. Even then this should be rare or non-existent
-  // and we don't care that much anyway.
-
-  int index = _jmp_ring_index;
-  _jmp_ring_index = (index + 1) & (jump_ring_buffer_size - 1);
-  _jmp_ring[index]._target = (intptr_t) target;
-  _jmp_ring[index]._instruction = (intptr_t) instr;
-  _jmp_ring[index]._file = file;
-  _jmp_ring[index]._line = line;
-}
-#endif // PRODUCT
-
-void Thread::interrupt(Thread* thread) {
-  debug_only(check_for_dangling_thread_pointer(thread);)
-  os::interrupt(thread);
-}
-
-bool Thread::is_interrupted(Thread* thread, bool clear_interrupted) {
-  debug_only(check_for_dangling_thread_pointer(thread);)
-  // Note:  If clear_interrupted==false, this simply fetches and
-  // returns the value of the field osthread()->interrupted().
-  return os::is_interrupted(thread, clear_interrupted);
-}
-
-
 // GC Support
 bool Thread::claim_par_threads_do(uintx claim_token) {
   uintx token = _threads_do_token;
@@ -989,7 +957,7 @@
 
 #ifdef ASSERT
 void Thread::print_owned_locks_on(outputStream* st) const {
-  Monitor *cur = _owned_locks;
+  Mutex* cur = _owned_locks;
   if (cur == NULL) {
     st->print(" (no locks) ");
   } else {
@@ -1001,60 +969,38 @@
   }
 }
 
-static int ref_use_count  = 0;
-
-bool Thread::owns_locks_but_compiled_lock() const {
-  for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
-    if (cur != Compile_lock) return true;
-  }
-  return false;
-}
-
-
-#endif
-
-#ifndef PRODUCT
-
-// The flag: potential_vm_operation notifies if this particular safepoint state could potentially
-// invoke the vm-thread (e.g., an oop allocation). In that case, we also have to make sure that
-// no locks which allow_vm_block's are held
-void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
-  // Check if current thread is allowed to block at a safepoint
-  if (!(_allow_safepoint_count == 0)) {
+// Checks safepoint allowed and clears unhandled oops at potential safepoints.
+void Thread::check_possible_safepoint() {
+  if (!is_Java_thread()) return;
+
+  if (_no_safepoint_count > 0) {
+    print_owned_locks();
     fatal("Possible safepoint reached by thread that does not allow it");
   }
-  if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
+#ifdef CHECK_UNHANDLED_OOPS
+  // Clear unhandled oops in JavaThreads so we get a crash right away.
+  clear_unhandled_oops();
+#endif // CHECK_UNHANDLED_OOPS
+}
+
+void Thread::check_for_valid_safepoint_state() {
+  if (!is_Java_thread()) return;
+
+  // Check NoSafepointVerifier, which is implied by locks taken that can be
+  // shared with the VM thread.  This makes sure that no locks with allow_vm_block
+  // are held.
+  check_possible_safepoint();
+
+  if (((JavaThread*)this)->thread_state() != _thread_in_vm) {
     fatal("LEAF method calling lock?");
   }
 
-#ifdef ASSERT
-  if (potential_vm_operation && is_Java_thread()
-      && !Universe::is_bootstrapping()) {
-    // Make sure we do not hold any locks that the VM thread also uses.
-    // This could potentially lead to deadlocks
-    for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
-      // Threads_lock is special, since the safepoint synchronization will not start before this is
-      // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
-      // since it is used to transfer control between JavaThreads and the VMThread
-      // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
-      if ((cur->allow_vm_block() &&
-           cur != Threads_lock &&
-           cur != Compile_lock &&               // Temporary: should not be necessary when we get separate compilation
-           cur != VMOperationRequest_lock &&
-           cur != VMOperationQueue_lock) ||
-           cur->rank() == Mutex::special) {
-        fatal("Thread holding lock at safepoint that vm can block on: %s", cur->name());
-      }
-    }
-  }
-
   if (GCALotAtAllSafepoints) {
     // We could enter a safepoint here and thus have a gc
     InterfaceSupport::check_gc_alot();
   }
-#endif
 }
-#endif
+#endif // ASSERT
 
 bool Thread::is_in_stack(address adr) const {
   assert(Thread::current() == this, "is_in_stack can only be called from current thread");
@@ -1350,16 +1296,12 @@
 {}
 
 NamedThread::~NamedThread() {
-  if (_name != NULL) {
-    FREE_C_HEAP_ARRAY(char, _name);
-    _name = NULL;
-  }
+  FREE_C_HEAP_ARRAY(char, _name);
 }
 
 void NamedThread::set_name(const char* format, ...) {
   guarantee(_name == NULL, "Only get to set name once.");
   _name = NEW_C_HEAP_ARRAY(char, max_name_len, mtThread);
-  guarantee(_name != NULL, "alloc failure");
   va_list ap;
   va_start(ap, format);
   jio_vsnprintf(_name, max_name_len, format, ap);
@@ -1591,6 +1533,55 @@
   }
 }
 
+// Attempt to enlarge the array for per thread counters.
+jlong* resize_counters_array(jlong* old_counters, int current_size, int new_size) {
+  jlong* new_counters = NEW_C_HEAP_ARRAY(jlong, new_size, mtJVMCI);
+  if (old_counters == NULL) {
+    old_counters = new_counters;
+    memset(old_counters, 0, sizeof(jlong) * new_size);
+  } else {
+    for (int i = 0; i < MIN2((int) current_size, new_size); i++) {
+      new_counters[i] = old_counters[i];
+    }
+    if (new_size > current_size) {
+      memset(new_counters + current_size, 0, sizeof(jlong) * (new_size - current_size));
+    }
+    FREE_C_HEAP_ARRAY(jlong, old_counters);
+  }
+  return new_counters;
+}
+
+// Attempt to enlarge the array for per thread counters.
+void JavaThread::resize_counters(int current_size, int new_size) {
+  _jvmci_counters = resize_counters_array(_jvmci_counters, current_size, new_size);
+}
+
+class VM_JVMCIResizeCounters : public VM_Operation {
+ private:
+  int _new_size;
+
+ public:
+  VM_JVMCIResizeCounters(int new_size) : _new_size(new_size) { }
+  VMOp_Type type()                  const        { return VMOp_JVMCIResizeCounters; }
+  bool allow_nested_vm_operations() const        { return true; }
+  void doit() {
+    // Resize the old thread counters array
+    jlong* new_counters = resize_counters_array(JavaThread::_jvmci_old_thread_counters, JVMCICounterSize, _new_size);
+    JavaThread::_jvmci_old_thread_counters = new_counters;
+
+    // Now resize each threads array
+    for (JavaThreadIteratorWithHandle jtiwh; JavaThread *tp = jtiwh.next(); ) {
+      tp->resize_counters(JVMCICounterSize, _new_size);
+    }
+    JVMCICounterSize = _new_size;
+  }
+};
+
+void JavaThread::resize_all_jvmci_counters(int new_size) {
+  VM_JVMCIResizeCounters op(new_size);
+  VMThread::execute(&op);
+}
+
 #endif // INCLUDE_JVMCI
 
 // A JavaThread is a normal Java thread
@@ -1611,7 +1602,6 @@
   set_deferred_locals(NULL);
   set_deopt_mark(NULL);
   set_deopt_compiled_method(NULL);
-  clear_must_deopt_id();
   set_monitor_chunks(NULL);
   _on_thread_list = false;
   set_thread_state(_thread_new);
@@ -1629,11 +1619,9 @@
   _in_retryable_allocation = false;
   _jvmci._alternate_call_target = NULL;
   assert(_jvmci._implicit_exception_pc == NULL, "must be");
+  _jvmci_counters = NULL;
   if (JVMCICounterSize > 0) {
-    _jvmci_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtInternal);
-    memset(_jvmci_counters, 0, sizeof(jlong) * JVMCICounterSize);
-  } else {
-    _jvmci_counters = NULL;
+    resize_counters(0, (int) JVMCICounterSize);
   }
 #endif // INCLUDE_JVMCI
   _reserved_stack_activation = NULL;  // stack base not known yet
@@ -1648,20 +1636,12 @@
   _pending_async_exception = NULL;
   _thread_stat = NULL;
   _thread_stat = new ThreadStatistics();
-  _blocked_on_compilation = false;
   _jni_active_critical = 0;
   _pending_jni_exception_check_fn = NULL;
   _do_not_unlock_if_synchronized = false;
   _cached_monitor_info = NULL;
   _parker = Parker::Allocate(this);
-
-#ifndef PRODUCT
-  _jmp_ring_index = 0;
-  for (int ji = 0; ji < jump_ring_buffer_size; ji++) {
-    record_jump(NULL, NULL, NULL, 0);
-  }
-#endif // PRODUCT
-
+  _SleepEvent = ParkEvent::Allocate(this);
   // Setup safepoint state info for this thread
   ThreadSafepointState::create(this);
 
@@ -1693,6 +1673,56 @@
   assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
 }
 
+
+// interrupt support
+
+void JavaThread::interrupt() {
+  debug_only(check_for_dangling_thread_pointer(this);)
+
+  if (!osthread()->interrupted()) {
+    osthread()->set_interrupted(true);
+    // More than one thread can get here with the same value of osthread,
+    // resulting in multiple notifications.  We do, however, want the store
+    // to interrupted() to be visible to other threads before we execute unpark().
+    OrderAccess::fence();
+
+    // For JavaThread::sleep. Historically we only unpark if changing to the interrupted
+    // state, in contrast to the other events below. Not clear exactly why.
+    _SleepEvent->unpark();
+  }
+
+  // For JSR166. Unpark even if interrupt status already was set.
+  parker()->unpark();
+
+  // For ObjectMonitor and JvmtiRawMonitor
+  _ParkEvent->unpark();
+}
+
+
+bool JavaThread::is_interrupted(bool clear_interrupted) {
+  debug_only(check_for_dangling_thread_pointer(this);)
+  bool interrupted = osthread()->interrupted();
+
+  // NOTE that since there is no "lock" around the interrupt and
+  // is_interrupted operations, there is the possibility that the
+  // interrupted flag (in osThread) will be "false" but that the
+  // low-level events will be in the signaled state. This is
+  // intentional. The effect of this is that Object.wait() and
+  // LockSupport.park() will appear to have a spurious wakeup, which
+  // is allowed and not harmful, and the possibility is so rare that
+  // it is not worth the added complexity to add yet another lock.
+  // For the sleep event an explicit reset is performed on entry
+  // to JavaThread::sleep, so there is no early return. It has also been
+  // recommended not to put the interrupted flag into the "event"
+  // structure because it hides the issue.
+  if (interrupted && clear_interrupted) {
+    osthread()->set_interrupted(false);
+    // consider thread->_SleepEvent->reset() ... optional optimization
+  }
+
+  return interrupted;
+}
+
 bool JavaThread::reguard_stack(address cur_sp) {
   if (_stack_guard_state != stack_guard_yellow_reserved_disabled
       && _stack_guard_state != stack_guard_reserved_disabled) {
@@ -1733,8 +1763,11 @@
 void JavaThread::block_if_vm_exited() {
   if (_terminated == _vm_exited) {
     // _vm_exited is set at safepoint, and Threads_lock is never released
-    // we will block here forever
-    Threads_lock->lock_without_safepoint_check();
+    // we will block here forever.
+    // Here we can be doing a jump from a safe state to an unsafe state without
+    // proper transition, but it happens after the final safepoint has begun.
+    set_thread_state(_thread_in_vm);
+    Threads_lock->lock();
     ShouldNotReachHere();
   }
 }
@@ -1773,6 +1806,10 @@
   Parker::Release(_parker);
   _parker = NULL;
 
+  // Return the sleep event to the free list
+  ParkEvent::Release(_SleepEvent);
+  _SleepEvent = NULL;
+
   // Free any remaining  previous UnrollBlock
   vframeArray* old_array = vframe_array_last();
 
@@ -2274,14 +2311,9 @@
 }
 
 void JavaThread::handle_special_runtime_exit_condition(bool check_asyncs) {
-  //
+
   // Check for pending external suspend.
-  // If JNIEnv proxies are allowed, don't self-suspend if the target
-  // thread is not the current thread. In older versions of jdbx, jdbx
-  // threads could call into the VM with another thread's JNIEnv so we
-  // can be here operating on behalf of a suspended thread (4432884).
-  bool do_self_suspend = is_external_suspend_with_lock();
-  if (do_self_suspend && (!AllowJNIEnvProxy || this == JavaThread::current())) {
+  if (is_external_suspend_with_lock()) {
     frame_anchor()->make_walkable(this);
     java_suspend_self_with_safepoint_check();
   }
@@ -2338,8 +2370,8 @@
   }
 
 
-  // Interrupt thread so it will wake up from a potential wait()
-  Thread::interrupt(this);
+  // Interrupt thread so it will wake up from a potential wait()/sleep()/park()
+  this->interrupt();
 }
 
 // External suspension mechanism.
@@ -2506,19 +2538,12 @@
 void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread) {
   assert(thread->thread_state() == _thread_in_native_trans, "wrong state");
 
-  JavaThread *curJT = JavaThread::current();
-  bool do_self_suspend = thread->is_external_suspend();
-
-  assert(!curJT->has_last_Java_frame() || curJT->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition");
-
-  // If JNIEnv proxies are allowed, don't self-suspend if the target
-  // thread is not the current thread. In older versions of jdbx, jdbx
-  // threads could call into the VM with another thread's JNIEnv so we
-  // can be here operating on behalf of a suspended thread (4432884).
-  if (do_self_suspend && (!AllowJNIEnvProxy || curJT == thread)) {
+  assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition");
+
+  if (thread->is_external_suspend()) {
     thread->java_suspend_self_with_safepoint_check();
   } else {
-    SafepointMechanism::block_if_requested(curJT);
+    SafepointMechanism::block_if_requested(thread);
   }
 
   JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(thread);)
@@ -2832,13 +2857,13 @@
 #endif // PRODUCT
 
 
-void JavaThread::deoptimize_marked_methods(bool in_handshake) {
+void JavaThread::deoptimize_marked_methods() {
   if (!has_last_Java_frame()) return;
   // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
   StackFrameStream fst(this, UseBiasedLocking);
   for (; !fst.is_done(); fst.next()) {
     if (fst.current()->should_be_deoptimized()) {
-      Deoptimization::deoptimize(this, *fst.current(), fst.register_map(), in_handshake);
+      Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
     }
   }
 }
@@ -2897,11 +2922,6 @@
     }
   }
 
-  // callee_target is never live across a gc point so NULL it here should
-  // it still contain a methdOop.
-
-  set_callee_target(NULL);
-
   assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!");
   // If we have deferred set_locals there might be oops waiting to be
   // written
@@ -2989,9 +3009,6 @@
 void JavaThread::print_thread_state_on(outputStream *st) const {
   st->print_cr("   JavaThread state: %s", _get_thread_state_name(_thread_state));
 };
-void JavaThread::print_thread_state() const {
-  print_thread_state_on(tty);
-}
 #endif // PRODUCT
 
 // Called by Threads::print() for VM_PrintThreads operation
@@ -3112,47 +3129,10 @@
   return name_str;
 }
 
-
-const char* JavaThread::get_threadgroup_name() const {
-  debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
-  oop thread_obj = threadObj();
-  if (thread_obj != NULL) {
-    oop thread_group = java_lang_Thread::threadGroup(thread_obj);
-    if (thread_group != NULL) {
-      // ThreadGroup.name can be null
-      return java_lang_ThreadGroup::name(thread_group);
-    }
-  }
-  return NULL;
-}
-
-const char* JavaThread::get_parent_name() const {
-  debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
-  oop thread_obj = threadObj();
-  if (thread_obj != NULL) {
-    oop thread_group = java_lang_Thread::threadGroup(thread_obj);
-    if (thread_group != NULL) {
-      oop parent = java_lang_ThreadGroup::parent(thread_group);
-      if (parent != NULL) {
-        // ThreadGroup.name can be null
-        return java_lang_ThreadGroup::name(parent);
-      }
-    }
-  }
-  return NULL;
-}
-
-ThreadPriority JavaThread::java_priority() const {
-  oop thr_oop = threadObj();
-  if (thr_oop == NULL) return NormPriority; // Bootstrapping
-  ThreadPriority priority = java_lang_Thread::priority(thr_oop);
-  assert(MinPriority <= priority && priority <= MaxPriority, "sanity check");
-  return priority;
-}
-
 void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) {
 
   assert(Threads_lock->owner() == Thread::current(), "must have threads lock");
+  assert(NoPriority <= prio && prio <= MaxPriority, "sanity check");
   // Link Java Thread object <-> C++ Thread
 
   // Get the C++ thread object (an oop) from the JNI handle (a jthread)
@@ -3253,7 +3233,7 @@
 
 void JavaThread::popframe_free_preserved_args() {
   assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice");
-  FREE_C_HEAP_ARRAY(char, (char*) _popframe_preserved_args);
+  FREE_C_HEAP_ARRAY(char, (char*)_popframe_preserved_args);
   _popframe_preserved_args = NULL;
   _popframe_preserved_args_size = 0;
 }
@@ -3292,23 +3272,6 @@
   virtual void do_oop(narrowOop* p)  { do_oop_work(p); }
 };
 
-
-static void oops_print(frame* f, const RegisterMap *map) {
-  PrintAndVerifyOopClosure print;
-  f->print_value();
-  f->oops_do(&print, NULL, (RegisterMap*)map);
-}
-
-// Print our all the locations that contain oops and whether they are
-// valid or not.  This useful when trying to find the oldest frame
-// where an oop has gone bad since the frame walk is from youngest to
-// oldest.
-void JavaThread::trace_oops() {
-  tty->print_cr("[Trace oops]");
-  frames_do(oops_print);
-}
-
-
 #ifdef ASSERT
 // Print or validate the layout of stack frames
 void JavaThread::print_frame_layout(int depth, bool validate_only) {
@@ -3377,6 +3340,62 @@
   return NULL;
 }
 
+// java.lang.Thread.sleep support
+// Returns true if sleep time elapsed as expected, and false
+// if the thread was interrupted.
+bool JavaThread::sleep(jlong millis) {
+  assert(this == Thread::current(),  "thread consistency check");
+
+  ParkEvent * const slp = this->_SleepEvent;
+  // Because there can be races with thread interruption sending an unpark()
+  // to the event, we explicitly reset it here to avoid an immediate return.
+  // The actual interrupt state will be checked before we park().
+  slp->reset();
+  // Thread interruption establishes a happens-before ordering in the
+  // Java Memory Model, so we need to ensure we synchronize with the
+  // interrupt state.
+  OrderAccess::fence();
+
+  jlong prevtime = os::javaTimeNanos();
+
+  for (;;) {
+    // interruption has precedence over timing out
+    if (this->is_interrupted(true)) {
+      return false;
+    }
+
+    if (millis <= 0) {
+      return true;
+    }
+
+    {
+      ThreadBlockInVM tbivm(this);
+      OSThreadWaitState osts(this->osthread(), false /* not Object.wait() */);
+
+      this->set_suspend_equivalent();
+      // cleared by handle_special_suspend_equivalent_condition() or
+      // java_suspend_self() via check_and_wait_while_suspended()
+
+      slp->park(millis);
+
+      // were we externally suspended while we were waiting?
+      this->check_and_wait_while_suspended();
+    }
+
+    // Update elapsed time tracking
+    jlong newtime = os::javaTimeNanos();
+    if (newtime - prevtime < 0) {
+      // time moving backwards, should only happen if no monotonic clock
+      // not a guarantee() because JVM should not abort on kernel/glibc bugs
+      assert(!os::supports_monotonic_clock(),
+             "unexpected time moving backwards detected in JavaThread::sleep()");
+    } else {
+      millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
+    }
+    prevtime = newtime;
+  }
+}
+
 static void compiler_thread_entry(JavaThread* thread, TRAPS) {
   assert(thread->is_Compiler_thread(), "must be compiler thread");
   CompileBroker::compiler_thread_loop();
@@ -3489,7 +3508,7 @@
 
 // All NonJavaThreads (i.e., every non-JavaThread in the system).
 void Threads::non_java_threads_do(ThreadClosure* tc) {
-  NoSafepointVerifier nsv(!SafepointSynchronize::is_at_safepoint(), false);
+  NoSafepointVerifier nsv;
   for (NonJavaThread::Iterator njti; !njti.end(); njti.step()) {
     tc->do_thread(njti.current());
   }
@@ -3650,6 +3669,9 @@
   initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK);
   initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK);
   initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK);
+
+  // Eager box cache initialization only if AOT is on and any library is loaded.
+  AOTLoader::initialize_box_caches(CHECK);
 }
 
 void Threads::initialize_jsr292_core_classes(TRAPS) {
@@ -3769,7 +3791,7 @@
 
 #if INCLUDE_JVMCI
   if (JVMCICounterSize > 0) {
-    JavaThread::_jvmci_old_thread_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtInternal);
+    JavaThread::_jvmci_old_thread_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtJVMCI);
     memset(JavaThread::_jvmci_old_thread_counters, 0, sizeof(jlong) * JVMCICounterSize);
   } else {
     JavaThread::_jvmci_old_thread_counters = NULL;
@@ -3826,7 +3848,7 @@
   // Create the VMThread
   { TraceTime timer("Start VMThread", TRACETIME_LOG(Info, startuptime));
 
-  VMThread::create();
+    VMThread::create();
     Thread* vmthread = VMThread::vm_thread();
 
     if (!os::create_thread(vmthread, os::vm_thread)) {
@@ -3912,6 +3934,7 @@
     Chunk::start_chunk_pool_cleaner_task();
   }
 
+
   // initialize compiler(s)
 #if defined(COMPILER1) || COMPILER2_OR_JVMCI
 #if INCLUDE_JVMCI
@@ -4163,7 +4186,7 @@
   for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
     // CDS dumping does not support native JVMTI agent.
     // CDS dumping supports Java agent if the AllowArchivingWithJavaAgent diagnostic option is specified.
-    if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+    if (Arguments::is_dumping_archive()) {
       if(!agent->is_instrument_lib()) {
         vm_exit_during_cds_dumping("CDS dumping does not support native JVMTI agent, name", agent->name());
       } else if (!AllowArchivingWithJavaAgent) {
@@ -4444,8 +4467,8 @@
 
 void Threads::remove(JavaThread* p, bool is_daemon) {
 
-  // Reclaim the ObjectMonitors from the omInUseList and omFreeList of the moribund thread.
-  ObjectSynchronizer::omFlush(p);
+  // Reclaim the ObjectMonitors from the om_in_use_list and om_free_list of the moribund thread.
+  ObjectSynchronizer::om_flush(p);
 
   // Extra scope needed for Thread_lock, so we can check
   // that we do not remove thread without safepoint code notice
@@ -4932,65 +4955,6 @@
   }
 }
 
-void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) {
-  intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0);
-  if (w == 0) return;
-  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
-    return;
-  }
-
-  ParkEvent * ReleaseAfter = NULL;
-  if (ev == NULL) {
-    ev = ReleaseAfter = ParkEvent::Allocate(NULL);
-  }
-  assert((intptr_t(ev) & LOCKBIT) == 0, "invariant");
-  for (;;) {
-    guarantee(ev->OnList == 0, "invariant");
-    int its = (os::is_MP() ? 100 : 0) + 1;
-
-    // Optional spin phase: spin-then-park strategy
-    while (--its >= 0) {
-      w = *Lock;
-      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
-        if (ReleaseAfter != NULL) {
-          ParkEvent::Release(ReleaseAfter);
-        }
-        return;
-      }
-    }
-
-    ev->reset();
-    ev->OnList = intptr_t(Lock);
-    // The following fence() isn't _strictly necessary as the subsequent
-    // CAS() both serializes execution and ratifies the fetched *Lock value.
-    OrderAccess::fence();
-    for (;;) {
-      w = *Lock;
-      if ((w & LOCKBIT) == 0) {
-        if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
-          ev->OnList = 0;
-          // We call ::Release while holding the outer lock, thus
-          // artificially lengthening the critical section.
-          // Consider deferring the ::Release() until the subsequent unlock(),
-          // after we've dropped the outer lock.
-          if (ReleaseAfter != NULL) {
-            ParkEvent::Release(ReleaseAfter);
-          }
-          return;
-        }
-        continue;      // Interference -- *Lock changed -- Just retry
-      }
-      assert(w & LOCKBIT, "invariant");
-      ev->ListNext = (ParkEvent *) (w & ~LOCKBIT);
-      if (Atomic::cmpxchg(intptr_t(ev)|LOCKBIT, Lock, w) == w) break;
-    }
-
-    while (ev->OnList != 0) {
-      ev->park();
-    }
-  }
-}
-
 // Release() must extract a successor from the list and then wake that thread.
 // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
 // similar to that used by ParkEvent::Allocate() and ::Release().  DMR-based