src/hotspot/share/runtime/thread.cpp
branchJEP-349-branch
changeset 58154 060d9d139109
parent 57878 bffba8d6611a
parent 58095 adc72cd1d1f2
child 58156 68031e660872
equal deleted inserted replaced
58101:84b0544833c4 58154:060d9d139109
    22  *
    22  *
    23  */
    23  */
    24 
    24 
    25 #include "precompiled.hpp"
    25 #include "precompiled.hpp"
    26 #include "jvm.h"
    26 #include "jvm.h"
       
    27 #include "aot/aotLoader.hpp"
    27 #include "classfile/classLoader.hpp"
    28 #include "classfile/classLoader.hpp"
    28 #include "classfile/javaClasses.hpp"
    29 #include "classfile/javaClasses.hpp"
    29 #include "classfile/moduleEntry.hpp"
    30 #include "classfile/moduleEntry.hpp"
    30 #include "classfile/systemDictionary.hpp"
    31 #include "classfile/systemDictionary.hpp"
    31 #include "classfile/vmSymbols.hpp"
    32 #include "classfile/vmSymbols.hpp"
   170 
   171 
   171 // ======= Thread ========
   172 // ======= Thread ========
   172 // Support for forcing alignment of thread objects for biased locking
   173 // Support for forcing alignment of thread objects for biased locking
   173 void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
   174 void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
   174   if (UseBiasedLocking) {
   175   if (UseBiasedLocking) {
   175     const int alignment = markOopDesc::biased_lock_alignment;
   176     const size_t alignment = markWord::biased_lock_alignment;
   176     size_t aligned_size = size + (alignment - sizeof(intptr_t));
   177     size_t aligned_size = size + (alignment - sizeof(intptr_t));
   177     void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
   178     void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
   178                                           : AllocateHeap(aligned_size, flags, CURRENT_PC,
   179                                           : AllocateHeap(aligned_size, flags, CURRENT_PC,
   179                                                          AllocFailStrategy::RETURN_NULL);
   180                                                          AllocFailStrategy::RETURN_NULL);
   180     void* aligned_addr     = align_up(real_malloc_addr, alignment);
   181     void* aligned_addr     = align_up(real_malloc_addr, alignment);
   220   DEBUG_ONLY(_run_state = PRE_CALL_RUN;)
   221   DEBUG_ONLY(_run_state = PRE_CALL_RUN;)
   221 
   222 
   222   // stack and get_thread
   223   // stack and get_thread
   223   set_stack_base(NULL);
   224   set_stack_base(NULL);
   224   set_stack_size(0);
   225   set_stack_size(0);
   225   set_self_raw_id(0);
       
   226   set_lgrp_id(-1);
   226   set_lgrp_id(-1);
   227   DEBUG_ONLY(clear_suspendible_thread();)
   227   DEBUG_ONLY(clear_suspendible_thread();)
   228 
   228 
   229   // allocated data structures
   229   // allocated data structures
   230   set_osthread(NULL);
   230   set_osthread(NULL);
   247   // the handle mark links itself to last_handle_mark
   247   // the handle mark links itself to last_handle_mark
   248   new HandleMark(this);
   248   new HandleMark(this);
   249 
   249 
   250   // plain initialization
   250   // plain initialization
   251   debug_only(_owned_locks = NULL;)
   251   debug_only(_owned_locks = NULL;)
   252   debug_only(_allow_allocation_count = 0;)
   252   NOT_PRODUCT(_no_safepoint_count = 0;)
   253   NOT_PRODUCT(_allow_safepoint_count = 0;)
       
   254   NOT_PRODUCT(_skip_gcalot = false;)
   253   NOT_PRODUCT(_skip_gcalot = false;)
   255   _jvmti_env_iteration_count = 0;
   254   _jvmti_env_iteration_count = 0;
   256   set_allocated_bytes(0);
   255   set_allocated_bytes(0);
   257   _vm_operation_started_count = 0;
   256   _vm_operation_started_count = 0;
   258   _vm_operation_completed_count = 0;
   257   _vm_operation_completed_count = 0;
   259   _current_pending_monitor = NULL;
   258   _current_pending_monitor = NULL;
   260   _current_pending_monitor_is_from_java = true;
   259   _current_pending_monitor_is_from_java = true;
   261   _current_waiting_monitor = NULL;
   260   _current_waiting_monitor = NULL;
   262   _num_nested_signal = 0;
   261   _num_nested_signal = 0;
   263   omFreeList = NULL;
   262   om_free_list = NULL;
   264   omFreeCount = 0;
   263   om_free_count = 0;
   265   omFreeProvision = 32;
   264   om_free_provision = 32;
   266   omInUseList = NULL;
   265   om_in_use_list = NULL;
   267   omInUseCount = 0;
   266   om_in_use_count = 0;
   268 
   267 
   269 #ifdef ASSERT
   268 #ifdef ASSERT
   270   _visited_for_critical_count = false;
   269   _visited_for_critical_count = false;
   271 #endif
   270 #endif
   272 
   271 
   290   // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents
   289   // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents
   291   // we might instead use a stack of ParkEvents that we could provision on-demand.
   290   // we might instead use a stack of ParkEvents that we could provision on-demand.
   292   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
   291   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
   293   // and ::Release()
   292   // and ::Release()
   294   _ParkEvent   = ParkEvent::Allocate(this);
   293   _ParkEvent   = ParkEvent::Allocate(this);
   295   _SleepEvent  = ParkEvent::Allocate(this);
       
   296   _MuxEvent    = ParkEvent::Allocate(this);
   294   _MuxEvent    = ParkEvent::Allocate(this);
   297 
   295 
   298 #ifdef CHECK_UNHANDLED_OOPS
   296 #ifdef CHECK_UNHANDLED_OOPS
   299   if (CheckUnhandledOops) {
   297   if (CheckUnhandledOops) {
   300     _unhandled_oops = new UnhandledOops(this);
   298     _unhandled_oops = new UnhandledOops(this);
   301   }
   299   }
   302 #endif // CHECK_UNHANDLED_OOPS
   300 #endif // CHECK_UNHANDLED_OOPS
   303 #ifdef ASSERT
   301 #ifdef ASSERT
   304   if (UseBiasedLocking) {
   302   if (UseBiasedLocking) {
   305     assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
   303     assert(is_aligned(this, markWord::biased_lock_alignment), "forced alignment of thread object failed");
   306     assert(this == _real_malloc_address ||
   304     assert(this == _real_malloc_address ||
   307            this == align_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment),
   305            this == align_up(_real_malloc_address, markWord::biased_lock_alignment),
   308            "bug in forced alignment of thread objects");
   306            "bug in forced alignment of thread objects");
   309   }
   307   }
   310 #endif // ASSERT
   308 #endif // ASSERT
   311 
   309 
   312   // Notify the barrier set that a thread is being created. The initial
   310   // Notify the barrier set that a thread is being created. The initial
   455   assert(last_handle_mark() == NULL, "check we have reached the end");
   453   assert(last_handle_mark() == NULL, "check we have reached the end");
   456 
   454 
   457   // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads.
   455   // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads.
   458   // We NULL out the fields for good hygiene.
   456   // We NULL out the fields for good hygiene.
   459   ParkEvent::Release(_ParkEvent); _ParkEvent   = NULL;
   457   ParkEvent::Release(_ParkEvent); _ParkEvent   = NULL;
   460   ParkEvent::Release(_SleepEvent); _SleepEvent  = NULL;
       
   461   ParkEvent::Release(_MuxEvent); _MuxEvent    = NULL;
   458   ParkEvent::Release(_MuxEvent); _MuxEvent    = NULL;
   462 
   459 
   463   delete handle_area();
   460   delete handle_area();
   464   delete metadata_handles();
   461   delete metadata_handles();
   465 
   462 
   857   }
   854   }
   858 
   855 
   859   return true;
   856   return true;
   860 }
   857 }
   861 
   858 
   862 #ifndef PRODUCT
       
   863 void JavaThread::record_jump(address target, address instr, const char* file,
       
   864                              int line) {
       
   865 
       
   866   // This should not need to be atomic as the only way for simultaneous
       
   867   // updates is via interrupts. Even then this should be rare or non-existent
       
   868   // and we don't care that much anyway.
       
   869 
       
   870   int index = _jmp_ring_index;
       
   871   _jmp_ring_index = (index + 1) & (jump_ring_buffer_size - 1);
       
   872   _jmp_ring[index]._target = (intptr_t) target;
       
   873   _jmp_ring[index]._instruction = (intptr_t) instr;
       
   874   _jmp_ring[index]._file = file;
       
   875   _jmp_ring[index]._line = line;
       
   876 }
       
   877 #endif // PRODUCT
       
   878 
       
   879 void Thread::interrupt(Thread* thread) {
   859 void Thread::interrupt(Thread* thread) {
   880   debug_only(check_for_dangling_thread_pointer(thread);)
   860   debug_only(check_for_dangling_thread_pointer(thread);)
   881   os::interrupt(thread);
   861   os::interrupt(thread);
   882 }
   862 }
   883 
   863 
   987   st->print(INTPTR_FORMAT, p2i(this));   // print address
   967   st->print(INTPTR_FORMAT, p2i(this));   // print address
   988 }
   968 }
   989 
   969 
   990 #ifdef ASSERT
   970 #ifdef ASSERT
   991 void Thread::print_owned_locks_on(outputStream* st) const {
   971 void Thread::print_owned_locks_on(outputStream* st) const {
   992   Monitor *cur = _owned_locks;
   972   Mutex* cur = _owned_locks;
   993   if (cur == NULL) {
   973   if (cur == NULL) {
   994     st->print(" (no locks) ");
   974     st->print(" (no locks) ");
   995   } else {
   975   } else {
   996     st->print_cr(" Locks owned:");
   976     st->print_cr(" Locks owned:");
   997     while (cur) {
   977     while (cur) {
   999       cur = cur->next();
   979       cur = cur->next();
  1000     }
   980     }
  1001   }
   981   }
  1002 }
   982 }
  1003 
   983 
  1004 static int ref_use_count  = 0;
   984 // Checks safepoint allowed and clears unhandled oops at potential safepoints.
  1005 
   985 void Thread::check_possible_safepoint() {
  1006 bool Thread::owns_locks_but_compiled_lock() const {
   986   if (!is_Java_thread()) return;
  1007   for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
   987 
  1008     if (cur != Compile_lock) return true;
   988   if (_no_safepoint_count > 0) {
  1009   }
   989     fatal("Possible safepoint reached by thread that does not allow it");
  1010   return false;
   990   }
  1011 }
   991 #ifdef CHECK_UNHANDLED_OOPS
  1012 
   992   // Clear unhandled oops in JavaThreads so we get a crash right away.
  1013 
   993   clear_unhandled_oops();
  1014 #endif
   994 #endif // CHECK_UNHANDLED_OOPS
  1015 
   995 }
  1016 #ifndef PRODUCT
       
  1017 
   996 
  1018 // The flag: potential_vm_operation notifies if this particular safepoint state could potentially
   997 // The flag: potential_vm_operation notifies if this particular safepoint state could potentially
  1019 // invoke the vm-thread (e.g., an oop allocation). In that case, we also have to make sure that
   998 // invoke the vm-thread (e.g., an oop allocation). In that case, we also have to make sure that
  1020 // no locks which allow_vm_block's are held
   999 // no locks which allow_vm_block's are held
  1021 void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
  1000 void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
  1022   // Check if current thread is allowed to block at a safepoint
  1001   if (!is_Java_thread()) return;
  1023   if (!(_allow_safepoint_count == 0)) {
  1002 
  1024     fatal("Possible safepoint reached by thread that does not allow it");
  1003   check_possible_safepoint();
  1025   }
  1004 
  1026   if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
  1005   if (((JavaThread*)this)->thread_state() != _thread_in_vm) {
  1027     fatal("LEAF method calling lock?");
  1006     fatal("LEAF method calling lock?");
  1028   }
  1007   }
  1029 
  1008 
  1030 #ifdef ASSERT
  1009   if (potential_vm_operation && !Universe::is_bootstrapping()) {
  1031   if (potential_vm_operation && is_Java_thread()
       
  1032       && !Universe::is_bootstrapping()) {
       
  1033     // Make sure we do not hold any locks that the VM thread also uses.
  1010     // Make sure we do not hold any locks that the VM thread also uses.
  1034     // This could potentially lead to deadlocks
  1011     // This could potentially lead to deadlocks
  1035     for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
  1012     for (Mutex* cur = _owned_locks; cur; cur = cur->next()) {
  1036       // Threads_lock is special, since the safepoint synchronization will not start before this is
  1013       // Threads_lock is special, since the safepoint synchronization will not start before this is
  1037       // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
  1014       // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
  1038       // since it is used to transfer control between JavaThreads and the VMThread
  1015       // since it is used to transfer control between JavaThreads and the VMThread
  1039       // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
  1016       // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
  1040       if ((cur->allow_vm_block() &&
  1017       if ((cur->allow_vm_block() &&
  1050 
  1027 
  1051   if (GCALotAtAllSafepoints) {
  1028   if (GCALotAtAllSafepoints) {
  1052     // We could enter a safepoint here and thus have a gc
  1029     // We could enter a safepoint here and thus have a gc
  1053     InterfaceSupport::check_gc_alot();
  1030     InterfaceSupport::check_gc_alot();
  1054   }
  1031   }
  1055 #endif
  1032 }
  1056 }
  1033 #endif // ASSERT
  1057 #endif
       
  1058 
  1034 
  1059 bool Thread::is_in_stack(address adr) const {
  1035 bool Thread::is_in_stack(address adr) const {
  1060   assert(Thread::current() == this, "is_in_stack can only be called from current thread");
  1036   assert(Thread::current() == this, "is_in_stack can only be called from current thread");
  1061   address end = os::current_stack_pointer();
  1037   address end = os::current_stack_pointer();
  1062   // Allow non Java threads to call this without stack_base
  1038   // Allow non Java threads to call this without stack_base
  1348   _processed_thread(NULL),
  1324   _processed_thread(NULL),
  1349   _gc_id(GCId::undefined())
  1325   _gc_id(GCId::undefined())
  1350 {}
  1326 {}
  1351 
  1327 
  1352 NamedThread::~NamedThread() {
  1328 NamedThread::~NamedThread() {
  1353   if (_name != NULL) {
  1329   FREE_C_HEAP_ARRAY(char, _name);
  1354     FREE_C_HEAP_ARRAY(char, _name);
       
  1355     _name = NULL;
       
  1356   }
       
  1357 }
  1330 }
  1358 
  1331 
  1359 void NamedThread::set_name(const char* format, ...) {
  1332 void NamedThread::set_name(const char* format, ...) {
  1360   guarantee(_name == NULL, "Only get to set name once.");
  1333   guarantee(_name == NULL, "Only get to set name once.");
  1361   _name = NEW_C_HEAP_ARRAY(char, max_name_len, mtThread);
  1334   _name = NEW_C_HEAP_ARRAY(char, max_name_len, mtThread);
  1362   guarantee(_name != NULL, "alloc failure");
       
  1363   va_list ap;
  1335   va_list ap;
  1364   va_start(ap, format);
  1336   va_start(ap, format);
  1365   jio_vsnprintf(_name, max_name_len, format, ap);
  1337   jio_vsnprintf(_name, max_name_len, format, ap);
  1366   va_end(ap);
  1338   va_end(ap);
  1367 }
  1339 }
  1589       }
  1561       }
  1590     }
  1562     }
  1591   }
  1563   }
  1592 }
  1564 }
  1593 
  1565 
       
  1566 // Attempt to enlarge the array for per thread counters.
       
  1567 jlong* resize_counters_array(jlong* old_counters, int current_size, int new_size) {
       
  1568   jlong* new_counters = NEW_C_HEAP_ARRAY(jlong, new_size, mtJVMCI);
       
  1569   if (new_counters == NULL) {
       
  1570     return NULL;
       
  1571   }
       
  1572   if (old_counters == NULL) {
       
  1573     old_counters = new_counters;
       
  1574     memset(old_counters, 0, sizeof(jlong) * new_size);
       
  1575   } else {
       
  1576     for (int i = 0; i < MIN2((int) current_size, new_size); i++) {
       
  1577       new_counters[i] = old_counters[i];
       
  1578     }
       
  1579     if (new_size > current_size) {
       
  1580       memset(new_counters + current_size, 0, sizeof(jlong) * (new_size - current_size));
       
  1581     }
       
  1582     FREE_C_HEAP_ARRAY(jlong, old_counters);
       
  1583   }
       
  1584   return new_counters;
       
  1585 }
       
  1586 
       
  1587 // Attempt to enlarge the array for per thread counters.
       
  1588 bool JavaThread::resize_counters(int current_size, int new_size) {
       
  1589   jlong* new_counters = resize_counters_array(_jvmci_counters, current_size, new_size);
       
  1590   if (new_counters == NULL) {
       
  1591     return false;
       
  1592   } else {
       
  1593     _jvmci_counters = new_counters;
       
  1594     return true;
       
  1595   }
       
  1596 }
       
  1597 
       
  1598 class VM_JVMCIResizeCounters : public VM_Operation {
       
  1599  private:
       
  1600   int _new_size;
       
  1601   bool _failed;
       
  1602 
       
  1603  public:
       
  1604   VM_JVMCIResizeCounters(int new_size) : _new_size(new_size), _failed(false) { }
       
  1605   VMOp_Type type()                  const        { return VMOp_JVMCIResizeCounters; }
       
  1606   bool allow_nested_vm_operations() const        { return true; }
       
  1607   void doit() {
       
  1608     // Resize the old thread counters array
       
  1609     jlong* new_counters = resize_counters_array(JavaThread::_jvmci_old_thread_counters, JVMCICounterSize, _new_size);
       
  1610     if (new_counters == NULL) {
       
  1611       _failed = true;
       
  1612       return;
       
  1613     } else {
       
  1614       JavaThread::_jvmci_old_thread_counters = new_counters;
       
  1615     }
       
  1616 
       
  1617     // Now resize each threads array
       
  1618     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *tp = jtiwh.next(); ) {
       
  1619       if (!tp->resize_counters(JVMCICounterSize, _new_size)) {
       
  1620         _failed = true;
       
  1621         break;
       
  1622       }
       
  1623     }
       
  1624     if (!_failed) {
       
  1625       JVMCICounterSize = _new_size;
       
  1626     }
       
  1627   }
       
  1628 
       
  1629   bool failed() { return _failed; }
       
  1630 };
       
  1631 
       
  1632 bool JavaThread::resize_all_jvmci_counters(int new_size) {
       
  1633   VM_JVMCIResizeCounters op(new_size);
       
  1634   VMThread::execute(&op);
       
  1635   return !op.failed();
       
  1636 }
       
  1637 
  1594 #endif // INCLUDE_JVMCI
  1638 #endif // INCLUDE_JVMCI
  1595 
  1639 
  1596 // A JavaThread is a normal Java thread
  1640 // A JavaThread is a normal Java thread
  1597 
  1641 
  1598 void JavaThread::initialize() {
  1642 void JavaThread::initialize() {
  1609   set_vframe_array_head(NULL);
  1653   set_vframe_array_head(NULL);
  1610   set_vframe_array_last(NULL);
  1654   set_vframe_array_last(NULL);
  1611   set_deferred_locals(NULL);
  1655   set_deferred_locals(NULL);
  1612   set_deopt_mark(NULL);
  1656   set_deopt_mark(NULL);
  1613   set_deopt_compiled_method(NULL);
  1657   set_deopt_compiled_method(NULL);
  1614   clear_must_deopt_id();
       
  1615   set_monitor_chunks(NULL);
  1658   set_monitor_chunks(NULL);
  1616   set_next(NULL);
       
  1617   _on_thread_list = false;
  1659   _on_thread_list = false;
  1618   set_thread_state(_thread_new);
  1660   set_thread_state(_thread_new);
  1619   _terminated = _not_terminated;
  1661   _terminated = _not_terminated;
  1620   _array_for_gc = NULL;
  1662   _array_for_gc = NULL;
  1621   _suspend_equivalent = false;
  1663   _suspend_equivalent = false;
  1628   _pending_failed_speculation = 0;
  1670   _pending_failed_speculation = 0;
  1629   _pending_transfer_to_interpreter = false;
  1671   _pending_transfer_to_interpreter = false;
  1630   _in_retryable_allocation = false;
  1672   _in_retryable_allocation = false;
  1631   _jvmci._alternate_call_target = NULL;
  1673   _jvmci._alternate_call_target = NULL;
  1632   assert(_jvmci._implicit_exception_pc == NULL, "must be");
  1674   assert(_jvmci._implicit_exception_pc == NULL, "must be");
       
  1675   _jvmci_counters = NULL;
  1633   if (JVMCICounterSize > 0) {
  1676   if (JVMCICounterSize > 0) {
  1634     _jvmci_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtInternal);
  1677     resize_counters(0, (int) JVMCICounterSize);
  1635     memset(_jvmci_counters, 0, sizeof(jlong) * JVMCICounterSize);
       
  1636   } else {
       
  1637     _jvmci_counters = NULL;
       
  1638   }
  1678   }
  1639 #endif // INCLUDE_JVMCI
  1679 #endif // INCLUDE_JVMCI
  1640   _reserved_stack_activation = NULL;  // stack base not known yet
  1680   _reserved_stack_activation = NULL;  // stack base not known yet
  1641   (void)const_cast<oop&>(_exception_oop = oop(NULL));
  1681   (void)const_cast<oop&>(_exception_oop = oop(NULL));
  1642   _exception_pc  = 0;
  1682   _exception_pc  = 0;
  1647   _interp_only_mode    = 0;
  1687   _interp_only_mode    = 0;
  1648   _special_runtime_exit_condition = _no_async_condition;
  1688   _special_runtime_exit_condition = _no_async_condition;
  1649   _pending_async_exception = NULL;
  1689   _pending_async_exception = NULL;
  1650   _thread_stat = NULL;
  1690   _thread_stat = NULL;
  1651   _thread_stat = new ThreadStatistics();
  1691   _thread_stat = new ThreadStatistics();
  1652   _blocked_on_compilation = false;
       
  1653   _jni_active_critical = 0;
  1692   _jni_active_critical = 0;
  1654   _pending_jni_exception_check_fn = NULL;
  1693   _pending_jni_exception_check_fn = NULL;
  1655   _do_not_unlock_if_synchronized = false;
  1694   _do_not_unlock_if_synchronized = false;
  1656   _cached_monitor_info = NULL;
  1695   _cached_monitor_info = NULL;
  1657   _parker = Parker::Allocate(this);
  1696   _parker = Parker::Allocate(this);
  1658 
  1697   _SleepEvent = ParkEvent::Allocate(this);
  1659 #ifndef PRODUCT
       
  1660   _jmp_ring_index = 0;
       
  1661   for (int ji = 0; ji < jump_ring_buffer_size; ji++) {
       
  1662     record_jump(NULL, NULL, NULL, 0);
       
  1663   }
       
  1664 #endif // PRODUCT
       
  1665 
       
  1666   // Setup safepoint state info for this thread
  1698   // Setup safepoint state info for this thread
  1667   ThreadSafepointState::create(this);
  1699   ThreadSafepointState::create(this);
  1668 
  1700 
  1669   debug_only(_java_call_counter = 0);
  1701   debug_only(_java_call_counter = 0);
  1670 
  1702 
  1733 
  1765 
  1734 void JavaThread::block_if_vm_exited() {
  1766 void JavaThread::block_if_vm_exited() {
  1735   if (_terminated == _vm_exited) {
  1767   if (_terminated == _vm_exited) {
  1736     // _vm_exited is set at safepoint, and Threads_lock is never released
  1768     // _vm_exited is set at safepoint, and Threads_lock is never released
  1737     // we will block here forever
  1769     // we will block here forever
  1738     Threads_lock->lock_without_safepoint_check();
  1770     Threads_lock->lock();
  1739     ShouldNotReachHere();
  1771     ShouldNotReachHere();
  1740   }
  1772   }
  1741 }
  1773 }
  1742 
  1774 
  1743 
  1775 
  1772 
  1804 
  1773   // JSR166 -- return the parker to the free list
  1805   // JSR166 -- return the parker to the free list
  1774   Parker::Release(_parker);
  1806   Parker::Release(_parker);
  1775   _parker = NULL;
  1807   _parker = NULL;
  1776 
  1808 
       
  1809   // Return the sleep event to the free list
       
  1810   ParkEvent::Release(_SleepEvent);
       
  1811   _SleepEvent = NULL;
       
  1812 
  1777   // Free any remaining  previous UnrollBlock
  1813   // Free any remaining  previous UnrollBlock
  1778   vframeArray* old_array = vframe_array_last();
  1814   vframeArray* old_array = vframe_array_last();
  1779 
  1815 
  1780   if (old_array != NULL) {
  1816   if (old_array != NULL) {
  1781     Deoptimization::UnrollBlock* old_info = old_array->unroll_block();
  1817     Deoptimization::UnrollBlock* old_info = old_array->unroll_block();
  2273          (!check_unsafe_error && condition == _async_unsafe_access_error),
  2309          (!check_unsafe_error && condition == _async_unsafe_access_error),
  2274          "must have handled the async condition, if no exception");
  2310          "must have handled the async condition, if no exception");
  2275 }
  2311 }
  2276 
  2312 
  2277 void JavaThread::handle_special_runtime_exit_condition(bool check_asyncs) {
  2313 void JavaThread::handle_special_runtime_exit_condition(bool check_asyncs) {
  2278   //
  2314 
  2279   // Check for pending external suspend.
  2315   // Check for pending external suspend.
  2280   // If JNIEnv proxies are allowed, don't self-suspend if the target
  2316   if (is_external_suspend_with_lock()) {
  2281   // thread is not the current thread. In older versions of jdbx, jdbx
       
  2282   // threads could call into the VM with another thread's JNIEnv so we
       
  2283   // can be here operating on behalf of a suspended thread (4432884).
       
  2284   bool do_self_suspend = is_external_suspend_with_lock();
       
  2285   if (do_self_suspend && (!AllowJNIEnvProxy || this == JavaThread::current())) {
       
  2286     frame_anchor()->make_walkable(this);
  2317     frame_anchor()->make_walkable(this);
  2287     java_suspend_self_with_safepoint_check();
  2318     java_suspend_self_with_safepoint_check();
  2288   }
  2319   }
  2289 
  2320 
  2290   // We might be here for reasons in addition to the self-suspend request
  2321   // We might be here for reasons in addition to the self-suspend request
  2505 // Note only the ThreadInVMfromNative transition can call this function
  2536 // Note only the ThreadInVMfromNative transition can call this function
  2506 // directly and when thread state is _thread_in_native_trans
  2537 // directly and when thread state is _thread_in_native_trans
  2507 void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread) {
  2538 void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread) {
  2508   assert(thread->thread_state() == _thread_in_native_trans, "wrong state");
  2539   assert(thread->thread_state() == _thread_in_native_trans, "wrong state");
  2509 
  2540 
  2510   JavaThread *curJT = JavaThread::current();
  2541   assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition");
  2511   bool do_self_suspend = thread->is_external_suspend();
  2542 
  2512 
  2543   if (thread->is_external_suspend()) {
  2513   assert(!curJT->has_last_Java_frame() || curJT->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition");
       
  2514 
       
  2515   // If JNIEnv proxies are allowed, don't self-suspend if the target
       
  2516   // thread is not the current thread. In older versions of jdbx, jdbx
       
  2517   // threads could call into the VM with another thread's JNIEnv so we
       
  2518   // can be here operating on behalf of a suspended thread (4432884).
       
  2519   if (do_self_suspend && (!AllowJNIEnvProxy || curJT == thread)) {
       
  2520     thread->java_suspend_self_with_safepoint_check();
  2544     thread->java_suspend_self_with_safepoint_check();
  2521   } else {
  2545   } else {
  2522     SafepointMechanism::block_if_requested(curJT);
  2546     SafepointMechanism::block_if_requested(thread);
  2523   }
  2547   }
  2524 
  2548 
  2525   JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(thread);)
  2549   JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(thread);)
  2526 }
  2550 }
  2527 
  2551 
  2897     for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
  2921     for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
  2898       fst.current()->oops_do(f, cf, fst.register_map());
  2922       fst.current()->oops_do(f, cf, fst.register_map());
  2899     }
  2923     }
  2900   }
  2924   }
  2901 
  2925 
  2902   // callee_target is never live across a gc point so NULL it here should
       
  2903   // it still contain a methdOop.
       
  2904 
       
  2905   set_callee_target(NULL);
       
  2906 
       
  2907   assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!");
  2926   assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!");
  2908   // If we have deferred set_locals there might be oops waiting to be
  2927   // If we have deferred set_locals there might be oops waiting to be
  2909   // written
  2928   // written
  2910   GrowableArray<jvmtiDeferredLocalVariableSet*>* list = deferred_locals();
  2929   GrowableArray<jvmtiDeferredLocalVariableSet*>* list = deferred_locals();
  2911   if (list != NULL) {
  2930   if (list != NULL) {
  2989 
  3008 
  2990 #ifndef PRODUCT
  3009 #ifndef PRODUCT
  2991 void JavaThread::print_thread_state_on(outputStream *st) const {
  3010 void JavaThread::print_thread_state_on(outputStream *st) const {
  2992   st->print_cr("   JavaThread state: %s", _get_thread_state_name(_thread_state));
  3011   st->print_cr("   JavaThread state: %s", _get_thread_state_name(_thread_state));
  2993 };
  3012 };
  2994 void JavaThread::print_thread_state() const {
       
  2995   print_thread_state_on(tty);
       
  2996 }
       
  2997 #endif // PRODUCT
  3013 #endif // PRODUCT
  2998 
  3014 
  2999 // Called by Threads::print() for VM_PrintThreads operation
  3015 // Called by Threads::print() for VM_PrintThreads operation
  3000 void JavaThread::print_on(outputStream *st, bool print_extended_info) const {
  3016 void JavaThread::print_on(outputStream *st, bool print_extended_info) const {
  3001   st->print_raw("\"");
  3017   st->print_raw("\"");
  3112   }
  3128   }
  3113   assert(name_str != NULL, "unexpected NULL thread name");
  3129   assert(name_str != NULL, "unexpected NULL thread name");
  3114   return name_str;
  3130   return name_str;
  3115 }
  3131 }
  3116 
  3132 
  3117 
       
  3118 const char* JavaThread::get_threadgroup_name() const {
       
  3119   debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
       
  3120   oop thread_obj = threadObj();
       
  3121   if (thread_obj != NULL) {
       
  3122     oop thread_group = java_lang_Thread::threadGroup(thread_obj);
       
  3123     if (thread_group != NULL) {
       
  3124       // ThreadGroup.name can be null
       
  3125       return java_lang_ThreadGroup::name(thread_group);
       
  3126     }
       
  3127   }
       
  3128   return NULL;
       
  3129 }
       
  3130 
       
  3131 const char* JavaThread::get_parent_name() const {
       
  3132   debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
       
  3133   oop thread_obj = threadObj();
       
  3134   if (thread_obj != NULL) {
       
  3135     oop thread_group = java_lang_Thread::threadGroup(thread_obj);
       
  3136     if (thread_group != NULL) {
       
  3137       oop parent = java_lang_ThreadGroup::parent(thread_group);
       
  3138       if (parent != NULL) {
       
  3139         // ThreadGroup.name can be null
       
  3140         return java_lang_ThreadGroup::name(parent);
       
  3141       }
       
  3142     }
       
  3143   }
       
  3144   return NULL;
       
  3145 }
       
  3146 
       
  3147 ThreadPriority JavaThread::java_priority() const {
       
  3148   oop thr_oop = threadObj();
       
  3149   if (thr_oop == NULL) return NormPriority; // Bootstrapping
       
  3150   ThreadPriority priority = java_lang_Thread::priority(thr_oop);
       
  3151   assert(MinPriority <= priority && priority <= MaxPriority, "sanity check");
       
  3152   return priority;
       
  3153 }
       
  3154 
       
  3155 void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) {
  3133 void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) {
  3156 
  3134 
  3157   assert(Threads_lock->owner() == Thread::current(), "must have threads lock");
  3135   assert(Threads_lock->owner() == Thread::current(), "must have threads lock");
       
  3136   assert(NoPriority <= prio && prio <= MaxPriority, "sanity check");
  3158   // Link Java Thread object <-> C++ Thread
  3137   // Link Java Thread object <-> C++ Thread
  3159 
  3138 
  3160   // Get the C++ thread object (an oop) from the JNI handle (a jthread)
  3139   // Get the C++ thread object (an oop) from the JNI handle (a jthread)
  3161   // and put it into a new Handle.  The Handle "thread_oop" can then
  3140   // and put it into a new Handle.  The Handle "thread_oop" can then
  3162   // be used to pass the C++ thread object to other methods.
  3141   // be used to pass the C++ thread object to other methods.
  3253   return in_WordSize(sz / wordSize);
  3232   return in_WordSize(sz / wordSize);
  3254 }
  3233 }
  3255 
  3234 
  3256 void JavaThread::popframe_free_preserved_args() {
  3235 void JavaThread::popframe_free_preserved_args() {
  3257   assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice");
  3236   assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice");
  3258   FREE_C_HEAP_ARRAY(char, (char*) _popframe_preserved_args);
  3237   FREE_C_HEAP_ARRAY(char, (char*)_popframe_preserved_args);
  3259   _popframe_preserved_args = NULL;
  3238   _popframe_preserved_args = NULL;
  3260   _popframe_preserved_args_size = 0;
  3239   _popframe_preserved_args_size = 0;
  3261 }
  3240 }
  3262 
  3241 
  3263 #ifndef PRODUCT
  3242 #ifndef PRODUCT
  3292  public:
  3271  public:
  3293   virtual void do_oop(oop* p) { do_oop_work(p); }
  3272   virtual void do_oop(oop* p) { do_oop_work(p); }
  3294   virtual void do_oop(narrowOop* p)  { do_oop_work(p); }
  3273   virtual void do_oop(narrowOop* p)  { do_oop_work(p); }
  3295 };
  3274 };
  3296 
  3275 
  3297 
       
  3298 static void oops_print(frame* f, const RegisterMap *map) {
       
  3299   PrintAndVerifyOopClosure print;
       
  3300   f->print_value();
       
  3301   f->oops_do(&print, NULL, (RegisterMap*)map);
       
  3302 }
       
  3303 
       
  3304 // Print our all the locations that contain oops and whether they are
       
  3305 // valid or not.  This useful when trying to find the oldest frame
       
  3306 // where an oop has gone bad since the frame walk is from youngest to
       
  3307 // oldest.
       
  3308 void JavaThread::trace_oops() {
       
  3309   tty->print_cr("[Trace oops]");
       
  3310   frames_do(oops_print);
       
  3311 }
       
  3312 
       
  3313 
       
  3314 #ifdef ASSERT
  3276 #ifdef ASSERT
  3315 // Print or validate the layout of stack frames
  3277 // Print or validate the layout of stack frames
  3316 void JavaThread::print_frame_layout(int depth, bool validate_only) {
  3278 void JavaThread::print_frame_layout(int depth, bool validate_only) {
  3317   ResourceMark rm;
  3279   ResourceMark rm;
  3318   PRESERVE_EXCEPTION_MARK;
  3280   PRESERVE_EXCEPTION_MARK;
  3375   vfst.security_get_caller_frame(depth);
  3337   vfst.security_get_caller_frame(depth);
  3376   if (!vfst.at_end()) {
  3338   if (!vfst.at_end()) {
  3377     return vfst.method()->method_holder();
  3339     return vfst.method()->method_holder();
  3378   }
  3340   }
  3379   return NULL;
  3341   return NULL;
       
  3342 }
       
  3343 
       
  3344 // java.lang.Thread.sleep support
       
  3345 // Returns true if sleep time elapsed as expected, and false
       
  3346 // if the thread was interrupted.
       
  3347 bool JavaThread::sleep(jlong millis) {
       
  3348   assert(this == Thread::current(),  "thread consistency check");
       
  3349 
       
  3350   ParkEvent * const slp = this->_SleepEvent;
       
  3351   // Because there can be races with thread interruption sending an unpark()
       
  3352   // to the event, we explicitly reset it here to avoid an immediate return.
       
  3353   // The actual interrupt state will be checked before we park().
       
  3354   slp->reset();
       
  3355   // Thread interruption establishes a happens-before ordering in the
       
  3356   // Java Memory Model, so we need to ensure we synchronize with the
       
  3357   // interrupt state.
       
  3358   OrderAccess::fence();
       
  3359 
       
  3360   jlong prevtime = os::javaTimeNanos();
       
  3361 
       
  3362   for (;;) {
       
  3363     // interruption has precedence over timing out
       
  3364     if (os::is_interrupted(this, true)) {
       
  3365       return false;
       
  3366     }
       
  3367 
       
  3368     if (millis <= 0) {
       
  3369       return true;
       
  3370     }
       
  3371 
       
  3372     {
       
  3373       ThreadBlockInVM tbivm(this);
       
  3374       OSThreadWaitState osts(this->osthread(), false /* not Object.wait() */);
       
  3375 
       
  3376       this->set_suspend_equivalent();
       
  3377       // cleared by handle_special_suspend_equivalent_condition() or
       
  3378       // java_suspend_self() via check_and_wait_while_suspended()
       
  3379 
       
  3380       slp->park(millis);
       
  3381 
       
  3382       // were we externally suspended while we were waiting?
       
  3383       this->check_and_wait_while_suspended();
       
  3384     }
       
  3385 
       
  3386     // Update elapsed time tracking
       
  3387     jlong newtime = os::javaTimeNanos();
       
  3388     if (newtime - prevtime < 0) {
       
  3389       // time moving backwards, should only happen if no monotonic clock
       
  3390       // not a guarantee() because JVM should not abort on kernel/glibc bugs
       
  3391       assert(!os::supports_monotonic_clock(),
       
  3392              "unexpected time moving backwards detected in os::sleep()");
       
  3393     } else {
       
  3394       millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
       
  3395     }
       
  3396     prevtime = newtime;
       
  3397   }
  3380 }
  3398 }
  3381 
  3399 
  3382 static void compiler_thread_entry(JavaThread* thread, TRAPS) {
  3400 static void compiler_thread_entry(JavaThread* thread, TRAPS) {
  3383   assert(thread->is_Compiler_thread(), "must be compiler thread");
  3401   assert(thread->is_Compiler_thread(), "must be compiler thread");
  3384   CompileBroker::compiler_thread_loop();
  3402   CompileBroker::compiler_thread_loop();
  3455 //
  3473 //
  3456 // Note: The Threads_lock is currently more widely used than we
  3474 // Note: The Threads_lock is currently more widely used than we
  3457 // would like. We are actively migrating Threads_lock uses to other
  3475 // would like. We are actively migrating Threads_lock uses to other
  3458 // mechanisms in order to reduce Threads_lock contention.
  3476 // mechanisms in order to reduce Threads_lock contention.
  3459 
  3477 
  3460 JavaThread* Threads::_thread_list = NULL;
       
  3461 int         Threads::_number_of_threads = 0;
  3478 int         Threads::_number_of_threads = 0;
  3462 int         Threads::_number_of_non_daemon_threads = 0;
  3479 int         Threads::_number_of_non_daemon_threads = 0;
  3463 int         Threads::_return_code = 0;
  3480 int         Threads::_return_code = 0;
  3464 uintx       Threads::_thread_claim_token = 1; // Never zero.
  3481 uintx       Threads::_thread_claim_token = 1; // Never zero.
  3465 size_t      JavaThread::_stack_size_at_create = 0;
  3482 size_t      JavaThread::_stack_size_at_create = 0;
  3490 // All JavaThreads
  3507 // All JavaThreads
  3491 #define ALL_JAVA_THREADS(X) DO_JAVA_THREADS(ThreadsSMRSupport::get_java_thread_list(), X)
  3508 #define ALL_JAVA_THREADS(X) DO_JAVA_THREADS(ThreadsSMRSupport::get_java_thread_list(), X)
  3492 
  3509 
  3493 // All NonJavaThreads (i.e., every non-JavaThread in the system).
  3510 // All NonJavaThreads (i.e., every non-JavaThread in the system).
  3494 void Threads::non_java_threads_do(ThreadClosure* tc) {
  3511 void Threads::non_java_threads_do(ThreadClosure* tc) {
  3495   NoSafepointVerifier nsv(!SafepointSynchronize::is_at_safepoint(), false);
  3512   NoSafepointVerifier nsv;
  3496   for (NonJavaThread::Iterator njti; !njti.end(); njti.step()) {
  3513   for (NonJavaThread::Iterator njti; !njti.end(); njti.step()) {
  3497     tc->do_thread(njti.current());
  3514     tc->do_thread(njti.current());
  3498   }
  3515   }
  3499 }
  3516 }
  3500 
  3517 
  3651   initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK);
  3668   initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK);
  3652   initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK);
  3669   initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK);
  3653   initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK);
  3670   initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK);
  3654   initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK);
  3671   initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK);
  3655   initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK);
  3672   initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK);
       
  3673 
       
  3674   // Eager box cache initialization only if AOT is on and any library is loaded.
       
  3675   AOTLoader::initialize_box_caches(CHECK);
  3656 }
  3676 }
  3657 
  3677 
  3658 void Threads::initialize_jsr292_core_classes(TRAPS) {
  3678 void Threads::initialize_jsr292_core_classes(TRAPS) {
  3659   TraceTime timer("Initialize java.lang.invoke classes", TRACETIME_LOG(Info, startuptime));
  3679   TraceTime timer("Initialize java.lang.invoke classes", TRACETIME_LOG(Info, startuptime));
  3660 
  3680 
  3762   if (Arguments::init_agents_at_startup()) {
  3782   if (Arguments::init_agents_at_startup()) {
  3763     create_vm_init_agents();
  3783     create_vm_init_agents();
  3764   }
  3784   }
  3765 
  3785 
  3766   // Initialize Threads state
  3786   // Initialize Threads state
  3767   _thread_list = NULL;
       
  3768   _number_of_threads = 0;
  3787   _number_of_threads = 0;
  3769   _number_of_non_daemon_threads = 0;
  3788   _number_of_non_daemon_threads = 0;
  3770 
  3789 
  3771   // Initialize global data structures and create system classes in heap
  3790   // Initialize global data structures and create system classes in heap
  3772   vm_init_globals();
  3791   vm_init_globals();
  3773 
  3792 
  3774 #if INCLUDE_JVMCI
  3793 #if INCLUDE_JVMCI
  3775   if (JVMCICounterSize > 0) {
  3794   if (JVMCICounterSize > 0) {
  3776     JavaThread::_jvmci_old_thread_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtInternal);
  3795     JavaThread::_jvmci_old_thread_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtJVMCI);
  3777     memset(JavaThread::_jvmci_old_thread_counters, 0, sizeof(jlong) * JVMCICounterSize);
  3796     memset(JavaThread::_jvmci_old_thread_counters, 0, sizeof(jlong) * JVMCICounterSize);
  3778   } else {
  3797   } else {
  3779     JavaThread::_jvmci_old_thread_counters = NULL;
  3798     JavaThread::_jvmci_old_thread_counters = NULL;
  3780   }
  3799   }
  3781 #endif // INCLUDE_JVMCI
  3800 #endif // INCLUDE_JVMCI
  3914 
  3933 
  3915   if (CleanChunkPoolAsync) {
  3934   if (CleanChunkPoolAsync) {
  3916     Chunk::start_chunk_pool_cleaner_task();
  3935     Chunk::start_chunk_pool_cleaner_task();
  3917   }
  3936   }
  3918 
  3937 
       
  3938 
  3919   // initialize compiler(s)
  3939   // initialize compiler(s)
  3920 #if defined(COMPILER1) || COMPILER2_OR_JVMCI
  3940 #if defined(COMPILER1) || COMPILER2_OR_JVMCI
  3921 #if INCLUDE_JVMCI
  3941 #if INCLUDE_JVMCI
  3922   bool force_JVMCI_intialization = false;
  3942   bool force_JVMCI_intialization = false;
  3923   if (EnableJVMCI) {
  3943   if (EnableJVMCI) {
  3962 
  3982 
  3963   // cache the system and platform class loaders
  3983   // cache the system and platform class loaders
  3964   SystemDictionary::compute_java_loaders(CHECK_JNI_ERR);
  3984   SystemDictionary::compute_java_loaders(CHECK_JNI_ERR);
  3965 
  3985 
  3966 #if INCLUDE_CDS
  3986 #if INCLUDE_CDS
  3967   if (DumpSharedSpaces) {
  3987   // capture the module path info from the ModuleEntryTable
  3968     // capture the module path info from the ModuleEntryTable
  3988   ClassLoader::initialize_module_path(THREAD);
  3969     ClassLoader::initialize_module_path(THREAD);
       
  3970   }
       
  3971 #endif
  3989 #endif
  3972 
  3990 
  3973 #if INCLUDE_JVMCI
  3991 #if INCLUDE_JVMCI
  3974   if (force_JVMCI_intialization) {
  3992   if (force_JVMCI_intialization) {
  3975     JVMCI::initialize_compiler(CHECK_JNI_ERR);
  3993     JVMCI::initialize_compiler(CHECK_JNI_ERR);
  4167   JvmtiExport::enter_onload_phase();
  4185   JvmtiExport::enter_onload_phase();
  4168 
  4186 
  4169   for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
  4187   for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
  4170     // CDS dumping does not support native JVMTI agent.
  4188     // CDS dumping does not support native JVMTI agent.
  4171     // CDS dumping supports Java agent if the AllowArchivingWithJavaAgent diagnostic option is specified.
  4189     // CDS dumping supports Java agent if the AllowArchivingWithJavaAgent diagnostic option is specified.
  4172     if (DumpSharedSpaces) {
  4190     if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
  4173       if(!agent->is_instrument_lib()) {
  4191       if(!agent->is_instrument_lib()) {
  4174         vm_exit_during_cds_dumping("CDS dumping does not support native JVMTI agent, name", agent->name());
  4192         vm_exit_during_cds_dumping("CDS dumping does not support native JVMTI agent, name", agent->name());
  4175       } else if (!AllowArchivingWithJavaAgent) {
  4193       } else if (!AllowArchivingWithJavaAgent) {
  4176         vm_exit_during_cds_dumping(
  4194         vm_exit_during_cds_dumping(
  4177           "Must enable AllowArchivingWithJavaAgent in order to run Java agent during CDS dumping");
  4195           "Must enable AllowArchivingWithJavaAgent in order to run Java agent during CDS dumping");
  4423   // The threads lock must be owned at this point
  4441   // The threads lock must be owned at this point
  4424   assert(Threads_lock->owned_by_self(), "must have threads lock");
  4442   assert(Threads_lock->owned_by_self(), "must have threads lock");
  4425 
  4443 
  4426   BarrierSet::barrier_set()->on_thread_attach(p);
  4444   BarrierSet::barrier_set()->on_thread_attach(p);
  4427 
  4445 
  4428   p->set_next(_thread_list);
       
  4429   _thread_list = p;
       
  4430 
       
  4431   // Once a JavaThread is added to the Threads list, smr_delete() has
  4446   // Once a JavaThread is added to the Threads list, smr_delete() has
  4432   // to be used to delete it. Otherwise we can just delete it directly.
  4447   // to be used to delete it. Otherwise we can just delete it directly.
  4433   p->set_on_thread_list();
  4448   p->set_on_thread_list();
  4434 
  4449 
  4435   _number_of_threads++;
  4450   _number_of_threads++;
  4451   Events::log(p, "Thread added: " INTPTR_FORMAT, p2i(p));
  4466   Events::log(p, "Thread added: " INTPTR_FORMAT, p2i(p));
  4452 }
  4467 }
  4453 
  4468 
  4454 void Threads::remove(JavaThread* p, bool is_daemon) {
  4469 void Threads::remove(JavaThread* p, bool is_daemon) {
  4455 
  4470 
  4456   // Reclaim the ObjectMonitors from the omInUseList and omFreeList of the moribund thread.
  4471   // Reclaim the ObjectMonitors from the om_in_use_list and om_free_list of the moribund thread.
  4457   ObjectSynchronizer::omFlush(p);
  4472   ObjectSynchronizer::om_flush(p);
  4458 
  4473 
  4459   // Extra scope needed for Thread_lock, so we can check
  4474   // Extra scope needed for Thread_lock, so we can check
  4460   // that we do not remove thread without safepoint code notice
  4475   // that we do not remove thread without safepoint code notice
  4461   { MonitorLocker ml(Threads_lock);
  4476   { MonitorLocker ml(Threads_lock);
  4462 
  4477 
  4463     assert(ThreadsSMRSupport::get_java_thread_list()->includes(p), "p must be present");
  4478     assert(ThreadsSMRSupport::get_java_thread_list()->includes(p), "p must be present");
  4464 
  4479 
  4465     // Maintain fast thread list
  4480     // Maintain fast thread list
  4466     ThreadsSMRSupport::remove_thread(p);
  4481     ThreadsSMRSupport::remove_thread(p);
  4467 
       
  4468     JavaThread* current = _thread_list;
       
  4469     JavaThread* prev    = NULL;
       
  4470 
       
  4471     while (current != p) {
       
  4472       prev    = current;
       
  4473       current = current->next();
       
  4474     }
       
  4475 
       
  4476     if (prev) {
       
  4477       prev->set_next(current->next());
       
  4478     } else {
       
  4479       _thread_list = p->next();
       
  4480     }
       
  4481 
  4482 
  4482     _number_of_threads--;
  4483     _number_of_threads--;
  4483     if (!is_daemon) {
  4484     if (!is_daemon) {
  4484       _number_of_non_daemon_threads--;
  4485       _number_of_non_daemon_threads--;
  4485 
  4486