163 |
164 |
164 #endif // ndef DTRACE_ENABLED |
165 #endif // ndef DTRACE_ENABLED |
165 |
166 |
166 #ifndef USE_LIBRARY_BASED_TLS_ONLY |
167 #ifndef USE_LIBRARY_BASED_TLS_ONLY |
167 // Current thread is maintained as a thread-local variable |
168 // Current thread is maintained as a thread-local variable |
168 THREAD_LOCAL_DECL Thread* Thread::_thr_current = NULL; |
169 THREAD_LOCAL Thread* Thread::_thr_current = NULL; |
169 #endif |
170 #endif |
170 |
171 |
171 // ======= Thread ======== |
172 // ======= Thread ======== |
172 // Support for forcing alignment of thread objects for biased locking |
173 // Support for forcing alignment of thread objects for biased locking |
173 void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) { |
174 void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) { |
174 if (UseBiasedLocking) { |
175 if (UseBiasedLocking) { |
175 const int alignment = markOopDesc::biased_lock_alignment; |
176 const size_t alignment = markWord::biased_lock_alignment; |
176 size_t aligned_size = size + (alignment - sizeof(intptr_t)); |
177 size_t aligned_size = size + (alignment - sizeof(intptr_t)); |
177 void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC) |
178 void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC) |
178 : AllocateHeap(aligned_size, flags, CURRENT_PC, |
179 : AllocateHeap(aligned_size, flags, CURRENT_PC, |
179 AllocFailStrategy::RETURN_NULL); |
180 AllocFailStrategy::RETURN_NULL); |
180 void* aligned_addr = align_up(real_malloc_addr, alignment); |
181 void* aligned_addr = align_up(real_malloc_addr, alignment); |
247 // the handle mark links itself to last_handle_mark |
247 // the handle mark links itself to last_handle_mark |
248 new HandleMark(this); |
248 new HandleMark(this); |
249 |
249 |
250 // plain initialization |
250 // plain initialization |
251 debug_only(_owned_locks = NULL;) |
251 debug_only(_owned_locks = NULL;) |
252 debug_only(_allow_allocation_count = 0;) |
252 NOT_PRODUCT(_no_safepoint_count = 0;) |
253 NOT_PRODUCT(_allow_safepoint_count = 0;) |
|
254 NOT_PRODUCT(_skip_gcalot = false;) |
253 NOT_PRODUCT(_skip_gcalot = false;) |
255 _jvmti_env_iteration_count = 0; |
254 _jvmti_env_iteration_count = 0; |
256 set_allocated_bytes(0); |
255 set_allocated_bytes(0); |
257 _vm_operation_started_count = 0; |
256 _vm_operation_started_count = 0; |
258 _vm_operation_completed_count = 0; |
257 _vm_operation_completed_count = 0; |
259 _current_pending_monitor = NULL; |
258 _current_pending_monitor = NULL; |
260 _current_pending_monitor_is_from_java = true; |
259 _current_pending_monitor_is_from_java = true; |
261 _current_waiting_monitor = NULL; |
260 _current_waiting_monitor = NULL; |
|
261 _current_pending_raw_monitor = NULL; |
262 _num_nested_signal = 0; |
262 _num_nested_signal = 0; |
263 omFreeList = NULL; |
263 om_free_list = NULL; |
264 omFreeCount = 0; |
264 om_free_count = 0; |
265 omFreeProvision = 32; |
265 om_free_provision = 32; |
266 omInUseList = NULL; |
266 om_in_use_list = NULL; |
267 omInUseCount = 0; |
267 om_in_use_count = 0; |
268 |
268 |
269 #ifdef ASSERT |
269 #ifdef ASSERT |
270 _visited_for_critical_count = false; |
270 _visited_for_critical_count = false; |
271 #endif |
271 #endif |
272 |
272 |
290 // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents |
290 // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents |
291 // we might instead use a stack of ParkEvents that we could provision on-demand. |
291 // we might instead use a stack of ParkEvents that we could provision on-demand. |
292 // The stack would act as a cache to avoid calls to ParkEvent::Allocate() |
292 // The stack would act as a cache to avoid calls to ParkEvent::Allocate() |
293 // and ::Release() |
293 // and ::Release() |
294 _ParkEvent = ParkEvent::Allocate(this); |
294 _ParkEvent = ParkEvent::Allocate(this); |
295 _SleepEvent = ParkEvent::Allocate(this); |
|
296 _MuxEvent = ParkEvent::Allocate(this); |
295 _MuxEvent = ParkEvent::Allocate(this); |
297 |
296 |
298 #ifdef CHECK_UNHANDLED_OOPS |
297 #ifdef CHECK_UNHANDLED_OOPS |
299 if (CheckUnhandledOops) { |
298 if (CheckUnhandledOops) { |
300 _unhandled_oops = new UnhandledOops(this); |
299 _unhandled_oops = new UnhandledOops(this); |
301 } |
300 } |
302 #endif // CHECK_UNHANDLED_OOPS |
301 #endif // CHECK_UNHANDLED_OOPS |
303 #ifdef ASSERT |
302 #ifdef ASSERT |
304 if (UseBiasedLocking) { |
303 if (UseBiasedLocking) { |
305 assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed"); |
304 assert(is_aligned(this, markWord::biased_lock_alignment), "forced alignment of thread object failed"); |
306 assert(this == _real_malloc_address || |
305 assert(this == _real_malloc_address || |
307 this == align_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment), |
306 this == align_up(_real_malloc_address, markWord::biased_lock_alignment), |
308 "bug in forced alignment of thread objects"); |
307 "bug in forced alignment of thread objects"); |
309 } |
308 } |
310 #endif // ASSERT |
309 #endif // ASSERT |
311 |
310 |
312 // Notify the barrier set that a thread is being created. The initial |
311 // Notify the barrier set that a thread is being created. The initial |
455 assert(last_handle_mark() == NULL, "check we have reached the end"); |
454 assert(last_handle_mark() == NULL, "check we have reached the end"); |
456 |
455 |
457 // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads. |
456 // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads. |
458 // We NULL out the fields for good hygiene. |
457 // We NULL out the fields for good hygiene. |
459 ParkEvent::Release(_ParkEvent); _ParkEvent = NULL; |
458 ParkEvent::Release(_ParkEvent); _ParkEvent = NULL; |
460 ParkEvent::Release(_SleepEvent); _SleepEvent = NULL; |
|
461 ParkEvent::Release(_MuxEvent); _MuxEvent = NULL; |
459 ParkEvent::Release(_MuxEvent); _MuxEvent = NULL; |
462 |
460 |
463 delete handle_area(); |
461 delete handle_area(); |
464 delete metadata_handles(); |
462 delete metadata_handles(); |
465 |
463 |
857 } |
855 } |
858 |
856 |
859 return true; |
857 return true; |
860 } |
858 } |
861 |
859 |
862 #ifndef PRODUCT |
|
863 void JavaThread::record_jump(address target, address instr, const char* file, |
|
864 int line) { |
|
865 |
|
866 // This should not need to be atomic as the only way for simultaneous |
|
867 // updates is via interrupts. Even then this should be rare or non-existent |
|
868 // and we don't care that much anyway. |
|
869 |
|
870 int index = _jmp_ring_index; |
|
871 _jmp_ring_index = (index + 1) & (jump_ring_buffer_size - 1); |
|
872 _jmp_ring[index]._target = (intptr_t) target; |
|
873 _jmp_ring[index]._instruction = (intptr_t) instr; |
|
874 _jmp_ring[index]._file = file; |
|
875 _jmp_ring[index]._line = line; |
|
876 } |
|
877 #endif // PRODUCT |
|
878 |
|
879 void Thread::interrupt(Thread* thread) { |
|
880 debug_only(check_for_dangling_thread_pointer(thread);) |
|
881 os::interrupt(thread); |
|
882 } |
|
883 |
|
884 bool Thread::is_interrupted(Thread* thread, bool clear_interrupted) { |
|
885 debug_only(check_for_dangling_thread_pointer(thread);) |
|
886 // Note: If clear_interrupted==false, this simply fetches and |
|
887 // returns the value of the field osthread()->interrupted(). |
|
888 return os::is_interrupted(thread, clear_interrupted); |
|
889 } |
|
890 |
|
891 |
|
892 // GC Support |
860 // GC Support |
893 bool Thread::claim_par_threads_do(uintx claim_token) { |
861 bool Thread::claim_par_threads_do(uintx claim_token) { |
894 uintx token = _threads_do_token; |
862 uintx token = _threads_do_token; |
895 if (token != claim_token) { |
863 if (token != claim_token) { |
896 uintx res = Atomic::cmpxchg(claim_token, &_threads_do_token, token); |
864 uintx res = Atomic::cmpxchg(claim_token, &_threads_do_token, token); |
999 cur = cur->next(); |
967 cur = cur->next(); |
1000 } |
968 } |
1001 } |
969 } |
1002 } |
970 } |
1003 |
971 |
1004 static int ref_use_count = 0; |
972 // Checks safepoint allowed and clears unhandled oops at potential safepoints. |
1005 |
973 void Thread::check_possible_safepoint() { |
1006 bool Thread::owns_locks_but_compiled_lock() const { |
974 if (!is_Java_thread()) return; |
1007 for (Monitor *cur = _owned_locks; cur; cur = cur->next()) { |
975 |
1008 if (cur != Compile_lock) return true; |
976 if (_no_safepoint_count > 0) { |
1009 } |
977 print_owned_locks(); |
1010 return false; |
|
1011 } |
|
1012 |
|
1013 |
|
1014 #endif |
|
1015 |
|
1016 #ifndef PRODUCT |
|
1017 |
|
1018 // The flag: potential_vm_operation notifies if this particular safepoint state could potentially |
|
1019 // invoke the vm-thread (e.g., an oop allocation). In that case, we also have to make sure that |
|
1020 // no locks which allow_vm_block's are held |
|
1021 void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) { |
|
1022 // Check if current thread is allowed to block at a safepoint |
|
1023 if (!(_allow_safepoint_count == 0)) { |
|
1024 fatal("Possible safepoint reached by thread that does not allow it"); |
978 fatal("Possible safepoint reached by thread that does not allow it"); |
1025 } |
979 } |
1026 if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) { |
980 #ifdef CHECK_UNHANDLED_OOPS |
|
981 // Clear unhandled oops in JavaThreads so we get a crash right away. |
|
982 clear_unhandled_oops(); |
|
983 #endif // CHECK_UNHANDLED_OOPS |
|
984 } |
|
985 |
|
986 void Thread::check_for_valid_safepoint_state() { |
|
987 if (!is_Java_thread()) return; |
|
988 |
|
989 // Check NoSafepointVerifier, which is implied by locks taken that can be |
|
990 // shared with the VM thread. This makes sure that no locks with allow_vm_block |
|
991 // are held. |
|
992 check_possible_safepoint(); |
|
993 |
|
994 if (((JavaThread*)this)->thread_state() != _thread_in_vm) { |
1027 fatal("LEAF method calling lock?"); |
995 fatal("LEAF method calling lock?"); |
1028 } |
|
1029 |
|
1030 #ifdef ASSERT |
|
1031 if (potential_vm_operation && is_Java_thread() |
|
1032 && !Universe::is_bootstrapping()) { |
|
1033 // Make sure we do not hold any locks that the VM thread also uses. |
|
1034 // This could potentially lead to deadlocks |
|
1035 for (Monitor *cur = _owned_locks; cur; cur = cur->next()) { |
|
1036 // Threads_lock is special, since the safepoint synchronization will not start before this is |
|
1037 // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock, |
|
1038 // since it is used to transfer control between JavaThreads and the VMThread |
|
1039 // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first! |
|
1040 if ((cur->allow_vm_block() && |
|
1041 cur != Threads_lock && |
|
1042 cur != Compile_lock && // Temporary: should not be necessary when we get separate compilation |
|
1043 cur != VMOperationRequest_lock && |
|
1044 cur != VMOperationQueue_lock) || |
|
1045 cur->rank() == Mutex::special) { |
|
1046 fatal("Thread holding lock at safepoint that vm can block on: %s", cur->name()); |
|
1047 } |
|
1048 } |
|
1049 } |
996 } |
1050 |
997 |
1051 if (GCALotAtAllSafepoints) { |
998 if (GCALotAtAllSafepoints) { |
1052 // We could enter a safepoint here and thus have a gc |
999 // We could enter a safepoint here and thus have a gc |
1053 InterfaceSupport::check_gc_alot(); |
1000 InterfaceSupport::check_gc_alot(); |
1054 } |
1001 } |
1055 #endif |
1002 } |
1056 } |
1003 #endif // ASSERT |
1057 #endif |
|
1058 |
1004 |
1059 bool Thread::is_in_stack(address adr) const { |
1005 bool Thread::is_in_stack(address adr) const { |
1060 assert(Thread::current() == this, "is_in_stack can only be called from current thread"); |
1006 assert(Thread::current() == this, "is_in_stack can only be called from current thread"); |
1061 address end = os::current_stack_pointer(); |
1007 address end = os::current_stack_pointer(); |
1062 // Allow non Java threads to call this without stack_base |
1008 // Allow non Java threads to call this without stack_base |
1348 _processed_thread(NULL), |
1294 _processed_thread(NULL), |
1349 _gc_id(GCId::undefined()) |
1295 _gc_id(GCId::undefined()) |
1350 {} |
1296 {} |
1351 |
1297 |
1352 NamedThread::~NamedThread() { |
1298 NamedThread::~NamedThread() { |
1353 if (_name != NULL) { |
1299 FREE_C_HEAP_ARRAY(char, _name); |
1354 FREE_C_HEAP_ARRAY(char, _name); |
|
1355 _name = NULL; |
|
1356 } |
|
1357 } |
1300 } |
1358 |
1301 |
1359 void NamedThread::set_name(const char* format, ...) { |
1302 void NamedThread::set_name(const char* format, ...) { |
1360 guarantee(_name == NULL, "Only get to set name once."); |
1303 guarantee(_name == NULL, "Only get to set name once."); |
1361 _name = NEW_C_HEAP_ARRAY(char, max_name_len, mtThread); |
1304 _name = NEW_C_HEAP_ARRAY(char, max_name_len, mtThread); |
1362 guarantee(_name != NULL, "alloc failure"); |
|
1363 va_list ap; |
1305 va_list ap; |
1364 va_start(ap, format); |
1306 va_start(ap, format); |
1365 jio_vsnprintf(_name, max_name_len, format, ap); |
1307 jio_vsnprintf(_name, max_name_len, format, ap); |
1366 va_end(ap); |
1308 va_end(ap); |
1367 } |
1309 } |
1589 } |
1531 } |
1590 } |
1532 } |
1591 } |
1533 } |
1592 } |
1534 } |
1593 |
1535 |
|
1536 // Attempt to enlarge the array for per thread counters. |
|
1537 jlong* resize_counters_array(jlong* old_counters, int current_size, int new_size) { |
|
1538 jlong* new_counters = NEW_C_HEAP_ARRAY(jlong, new_size, mtJVMCI); |
|
1539 if (old_counters == NULL) { |
|
1540 old_counters = new_counters; |
|
1541 memset(old_counters, 0, sizeof(jlong) * new_size); |
|
1542 } else { |
|
1543 for (int i = 0; i < MIN2((int) current_size, new_size); i++) { |
|
1544 new_counters[i] = old_counters[i]; |
|
1545 } |
|
1546 if (new_size > current_size) { |
|
1547 memset(new_counters + current_size, 0, sizeof(jlong) * (new_size - current_size)); |
|
1548 } |
|
1549 FREE_C_HEAP_ARRAY(jlong, old_counters); |
|
1550 } |
|
1551 return new_counters; |
|
1552 } |
|
1553 |
|
1554 // Attempt to enlarge the array for per thread counters. |
|
1555 void JavaThread::resize_counters(int current_size, int new_size) { |
|
1556 _jvmci_counters = resize_counters_array(_jvmci_counters, current_size, new_size); |
|
1557 } |
|
1558 |
|
1559 class VM_JVMCIResizeCounters : public VM_Operation { |
|
1560 private: |
|
1561 int _new_size; |
|
1562 |
|
1563 public: |
|
1564 VM_JVMCIResizeCounters(int new_size) : _new_size(new_size) { } |
|
1565 VMOp_Type type() const { return VMOp_JVMCIResizeCounters; } |
|
1566 bool allow_nested_vm_operations() const { return true; } |
|
1567 void doit() { |
|
1568 // Resize the old thread counters array |
|
1569 jlong* new_counters = resize_counters_array(JavaThread::_jvmci_old_thread_counters, JVMCICounterSize, _new_size); |
|
1570 JavaThread::_jvmci_old_thread_counters = new_counters; |
|
1571 |
|
1572 // Now resize each threads array |
|
1573 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *tp = jtiwh.next(); ) { |
|
1574 tp->resize_counters(JVMCICounterSize, _new_size); |
|
1575 } |
|
1576 JVMCICounterSize = _new_size; |
|
1577 } |
|
1578 }; |
|
1579 |
|
1580 void JavaThread::resize_all_jvmci_counters(int new_size) { |
|
1581 VM_JVMCIResizeCounters op(new_size); |
|
1582 VMThread::execute(&op); |
|
1583 } |
|
1584 |
1594 #endif // INCLUDE_JVMCI |
1585 #endif // INCLUDE_JVMCI |
1595 |
1586 |
1596 // A JavaThread is a normal Java thread |
1587 // A JavaThread is a normal Java thread |
1597 |
1588 |
1598 void JavaThread::initialize() { |
1589 void JavaThread::initialize() { |
1609 set_vframe_array_head(NULL); |
1600 set_vframe_array_head(NULL); |
1610 set_vframe_array_last(NULL); |
1601 set_vframe_array_last(NULL); |
1611 set_deferred_locals(NULL); |
1602 set_deferred_locals(NULL); |
1612 set_deopt_mark(NULL); |
1603 set_deopt_mark(NULL); |
1613 set_deopt_compiled_method(NULL); |
1604 set_deopt_compiled_method(NULL); |
1614 clear_must_deopt_id(); |
|
1615 set_monitor_chunks(NULL); |
1605 set_monitor_chunks(NULL); |
1616 _on_thread_list = false; |
1606 _on_thread_list = false; |
1617 set_thread_state(_thread_new); |
1607 set_thread_state(_thread_new); |
1618 _terminated = _not_terminated; |
1608 _terminated = _not_terminated; |
1619 _array_for_gc = NULL; |
1609 _array_for_gc = NULL; |
1627 _pending_failed_speculation = 0; |
1617 _pending_failed_speculation = 0; |
1628 _pending_transfer_to_interpreter = false; |
1618 _pending_transfer_to_interpreter = false; |
1629 _in_retryable_allocation = false; |
1619 _in_retryable_allocation = false; |
1630 _jvmci._alternate_call_target = NULL; |
1620 _jvmci._alternate_call_target = NULL; |
1631 assert(_jvmci._implicit_exception_pc == NULL, "must be"); |
1621 assert(_jvmci._implicit_exception_pc == NULL, "must be"); |
|
1622 _jvmci_counters = NULL; |
1632 if (JVMCICounterSize > 0) { |
1623 if (JVMCICounterSize > 0) { |
1633 _jvmci_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtInternal); |
1624 resize_counters(0, (int) JVMCICounterSize); |
1634 memset(_jvmci_counters, 0, sizeof(jlong) * JVMCICounterSize); |
|
1635 } else { |
|
1636 _jvmci_counters = NULL; |
|
1637 } |
1625 } |
1638 #endif // INCLUDE_JVMCI |
1626 #endif // INCLUDE_JVMCI |
1639 _reserved_stack_activation = NULL; // stack base not known yet |
1627 _reserved_stack_activation = NULL; // stack base not known yet |
1640 (void)const_cast<oop&>(_exception_oop = oop(NULL)); |
1628 (void)const_cast<oop&>(_exception_oop = oop(NULL)); |
1641 _exception_pc = 0; |
1629 _exception_pc = 0; |
1646 _interp_only_mode = 0; |
1634 _interp_only_mode = 0; |
1647 _special_runtime_exit_condition = _no_async_condition; |
1635 _special_runtime_exit_condition = _no_async_condition; |
1648 _pending_async_exception = NULL; |
1636 _pending_async_exception = NULL; |
1649 _thread_stat = NULL; |
1637 _thread_stat = NULL; |
1650 _thread_stat = new ThreadStatistics(); |
1638 _thread_stat = new ThreadStatistics(); |
1651 _blocked_on_compilation = false; |
|
1652 _jni_active_critical = 0; |
1639 _jni_active_critical = 0; |
1653 _pending_jni_exception_check_fn = NULL; |
1640 _pending_jni_exception_check_fn = NULL; |
1654 _do_not_unlock_if_synchronized = false; |
1641 _do_not_unlock_if_synchronized = false; |
1655 _cached_monitor_info = NULL; |
1642 _cached_monitor_info = NULL; |
1656 _parker = Parker::Allocate(this); |
1643 _parker = Parker::Allocate(this); |
1657 |
1644 _SleepEvent = ParkEvent::Allocate(this); |
1658 #ifndef PRODUCT |
|
1659 _jmp_ring_index = 0; |
|
1660 for (int ji = 0; ji < jump_ring_buffer_size; ji++) { |
|
1661 record_jump(NULL, NULL, NULL, 0); |
|
1662 } |
|
1663 #endif // PRODUCT |
|
1664 |
|
1665 // Setup safepoint state info for this thread |
1645 // Setup safepoint state info for this thread |
1666 ThreadSafepointState::create(this); |
1646 ThreadSafepointState::create(this); |
1667 |
1647 |
1668 debug_only(_java_call_counter = 0); |
1648 debug_only(_java_call_counter = 0); |
1669 |
1649 |
1689 _jni_attach_state = _attaching_via_jni; |
1669 _jni_attach_state = _attaching_via_jni; |
1690 } else { |
1670 } else { |
1691 _jni_attach_state = _not_attaching_via_jni; |
1671 _jni_attach_state = _not_attaching_via_jni; |
1692 } |
1672 } |
1693 assert(deferred_card_mark().is_empty(), "Default MemRegion ctor"); |
1673 assert(deferred_card_mark().is_empty(), "Default MemRegion ctor"); |
|
1674 } |
|
1675 |
|
1676 |
|
1677 // interrupt support |
|
1678 |
|
1679 void JavaThread::interrupt() { |
|
1680 debug_only(check_for_dangling_thread_pointer(this);) |
|
1681 |
|
1682 if (!osthread()->interrupted()) { |
|
1683 osthread()->set_interrupted(true); |
|
1684 // More than one thread can get here with the same value of osthread, |
|
1685 // resulting in multiple notifications. We do, however, want the store |
|
1686 // to interrupted() to be visible to other threads before we execute unpark(). |
|
1687 OrderAccess::fence(); |
|
1688 |
|
1689 // For JavaThread::sleep. Historically we only unpark if changing to the interrupted |
|
1690 // state, in contrast to the other events below. Not clear exactly why. |
|
1691 _SleepEvent->unpark(); |
|
1692 } |
|
1693 |
|
1694 // For JSR166. Unpark even if interrupt status already was set. |
|
1695 parker()->unpark(); |
|
1696 |
|
1697 // For ObjectMonitor and JvmtiRawMonitor |
|
1698 _ParkEvent->unpark(); |
|
1699 } |
|
1700 |
|
1701 |
|
1702 bool JavaThread::is_interrupted(bool clear_interrupted) { |
|
1703 debug_only(check_for_dangling_thread_pointer(this);) |
|
1704 bool interrupted = osthread()->interrupted(); |
|
1705 |
|
1706 // NOTE that since there is no "lock" around the interrupt and |
|
1707 // is_interrupted operations, there is the possibility that the |
|
1708 // interrupted flag (in osThread) will be "false" but that the |
|
1709 // low-level events will be in the signaled state. This is |
|
1710 // intentional. The effect of this is that Object.wait() and |
|
1711 // LockSupport.park() will appear to have a spurious wakeup, which |
|
1712 // is allowed and not harmful, and the possibility is so rare that |
|
1713 // it is not worth the added complexity to add yet another lock. |
|
1714 // For the sleep event an explicit reset is performed on entry |
|
1715 // to JavaThread::sleep, so there is no early return. It has also been |
|
1716 // recommended not to put the interrupted flag into the "event" |
|
1717 // structure because it hides the issue. |
|
1718 if (interrupted && clear_interrupted) { |
|
1719 osthread()->set_interrupted(false); |
|
1720 // consider thread->_SleepEvent->reset() ... optional optimization |
|
1721 } |
|
1722 |
|
1723 return interrupted; |
1694 } |
1724 } |
1695 |
1725 |
1696 bool JavaThread::reguard_stack(address cur_sp) { |
1726 bool JavaThread::reguard_stack(address cur_sp) { |
1697 if (_stack_guard_state != stack_guard_yellow_reserved_disabled |
1727 if (_stack_guard_state != stack_guard_yellow_reserved_disabled |
1698 && _stack_guard_state != stack_guard_reserved_disabled) { |
1728 && _stack_guard_state != stack_guard_reserved_disabled) { |
1731 |
1761 |
1732 |
1762 |
1733 void JavaThread::block_if_vm_exited() { |
1763 void JavaThread::block_if_vm_exited() { |
1734 if (_terminated == _vm_exited) { |
1764 if (_terminated == _vm_exited) { |
1735 // _vm_exited is set at safepoint, and Threads_lock is never released |
1765 // _vm_exited is set at safepoint, and Threads_lock is never released |
1736 // we will block here forever |
1766 // we will block here forever. |
1737 Threads_lock->lock_without_safepoint_check(); |
1767 // Here we can be doing a jump from a safe state to an unsafe state without |
|
1768 // proper transition, but it happens after the final safepoint has begun. |
|
1769 set_thread_state(_thread_in_vm); |
|
1770 Threads_lock->lock(); |
1738 ShouldNotReachHere(); |
1771 ShouldNotReachHere(); |
1739 } |
1772 } |
1740 } |
1773 } |
1741 |
1774 |
1742 |
1775 |
1771 |
1804 |
1772 // JSR166 -- return the parker to the free list |
1805 // JSR166 -- return the parker to the free list |
1773 Parker::Release(_parker); |
1806 Parker::Release(_parker); |
1774 _parker = NULL; |
1807 _parker = NULL; |
1775 |
1808 |
|
1809 // Return the sleep event to the free list |
|
1810 ParkEvent::Release(_SleepEvent); |
|
1811 _SleepEvent = NULL; |
|
1812 |
1776 // Free any remaining previous UnrollBlock |
1813 // Free any remaining previous UnrollBlock |
1777 vframeArray* old_array = vframe_array_last(); |
1814 vframeArray* old_array = vframe_array_last(); |
1778 |
1815 |
1779 if (old_array != NULL) { |
1816 if (old_array != NULL) { |
1780 Deoptimization::UnrollBlock* old_info = old_array->unroll_block(); |
1817 Deoptimization::UnrollBlock* old_info = old_array->unroll_block(); |
2272 (!check_unsafe_error && condition == _async_unsafe_access_error), |
2309 (!check_unsafe_error && condition == _async_unsafe_access_error), |
2273 "must have handled the async condition, if no exception"); |
2310 "must have handled the async condition, if no exception"); |
2274 } |
2311 } |
2275 |
2312 |
2276 void JavaThread::handle_special_runtime_exit_condition(bool check_asyncs) { |
2313 void JavaThread::handle_special_runtime_exit_condition(bool check_asyncs) { |
2277 // |
2314 |
2278 // Check for pending external suspend. |
2315 // Check for pending external suspend. |
2279 // If JNIEnv proxies are allowed, don't self-suspend if the target |
2316 if (is_external_suspend_with_lock()) { |
2280 // thread is not the current thread. In older versions of jdbx, jdbx |
|
2281 // threads could call into the VM with another thread's JNIEnv so we |
|
2282 // can be here operating on behalf of a suspended thread (4432884). |
|
2283 bool do_self_suspend = is_external_suspend_with_lock(); |
|
2284 if (do_self_suspend && (!AllowJNIEnvProxy || this == JavaThread::current())) { |
|
2285 frame_anchor()->make_walkable(this); |
2317 frame_anchor()->make_walkable(this); |
2286 java_suspend_self_with_safepoint_check(); |
2318 java_suspend_self_with_safepoint_check(); |
2287 } |
2319 } |
2288 |
2320 |
2289 // We might be here for reasons in addition to the self-suspend request |
2321 // We might be here for reasons in addition to the self-suspend request |
2504 // Note only the ThreadInVMfromNative transition can call this function |
2536 // Note only the ThreadInVMfromNative transition can call this function |
2505 // directly and when thread state is _thread_in_native_trans |
2537 // directly and when thread state is _thread_in_native_trans |
2506 void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread) { |
2538 void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread) { |
2507 assert(thread->thread_state() == _thread_in_native_trans, "wrong state"); |
2539 assert(thread->thread_state() == _thread_in_native_trans, "wrong state"); |
2508 |
2540 |
2509 JavaThread *curJT = JavaThread::current(); |
2541 assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition"); |
2510 bool do_self_suspend = thread->is_external_suspend(); |
2542 |
2511 |
2543 if (thread->is_external_suspend()) { |
2512 assert(!curJT->has_last_Java_frame() || curJT->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition"); |
|
2513 |
|
2514 // If JNIEnv proxies are allowed, don't self-suspend if the target |
|
2515 // thread is not the current thread. In older versions of jdbx, jdbx |
|
2516 // threads could call into the VM with another thread's JNIEnv so we |
|
2517 // can be here operating on behalf of a suspended thread (4432884). |
|
2518 if (do_self_suspend && (!AllowJNIEnvProxy || curJT == thread)) { |
|
2519 thread->java_suspend_self_with_safepoint_check(); |
2544 thread->java_suspend_self_with_safepoint_check(); |
2520 } else { |
2545 } else { |
2521 SafepointMechanism::block_if_requested(curJT); |
2546 SafepointMechanism::block_if_requested(thread); |
2522 } |
2547 } |
2523 |
2548 |
2524 JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(thread);) |
2549 JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(thread);) |
2525 } |
2550 } |
2526 |
2551 |
2830 } |
2855 } |
2831 } |
2856 } |
2832 #endif // PRODUCT |
2857 #endif // PRODUCT |
2833 |
2858 |
2834 |
2859 |
2835 void JavaThread::deoptimize_marked_methods(bool in_handshake) { |
2860 void JavaThread::deoptimize_marked_methods() { |
2836 if (!has_last_Java_frame()) return; |
2861 if (!has_last_Java_frame()) return; |
2837 // BiasedLocking needs an updated RegisterMap for the revoke monitors pass |
2862 // BiasedLocking needs an updated RegisterMap for the revoke monitors pass |
2838 StackFrameStream fst(this, UseBiasedLocking); |
2863 StackFrameStream fst(this, UseBiasedLocking); |
2839 for (; !fst.is_done(); fst.next()) { |
2864 for (; !fst.is_done(); fst.next()) { |
2840 if (fst.current()->should_be_deoptimized()) { |
2865 if (fst.current()->should_be_deoptimized()) { |
2841 Deoptimization::deoptimize(this, *fst.current(), fst.register_map(), in_handshake); |
2866 Deoptimization::deoptimize(this, *fst.current(), fst.register_map()); |
2842 } |
2867 } |
2843 } |
2868 } |
2844 } |
2869 } |
2845 |
2870 |
2846 // If the caller is a NamedThread, then remember, in the current scope, |
2871 // If the caller is a NamedThread, then remember, in the current scope, |
2894 // Traverse the execution stack |
2919 // Traverse the execution stack |
2895 for (StackFrameStream fst(this); !fst.is_done(); fst.next()) { |
2920 for (StackFrameStream fst(this); !fst.is_done(); fst.next()) { |
2896 fst.current()->oops_do(f, cf, fst.register_map()); |
2921 fst.current()->oops_do(f, cf, fst.register_map()); |
2897 } |
2922 } |
2898 } |
2923 } |
2899 |
|
2900 // callee_target is never live across a gc point so NULL it here should |
|
2901 // it still contain a methdOop. |
|
2902 |
|
2903 set_callee_target(NULL); |
|
2904 |
2924 |
2905 assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!"); |
2925 assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!"); |
2906 // If we have deferred set_locals there might be oops waiting to be |
2926 // If we have deferred set_locals there might be oops waiting to be |
2907 // written |
2927 // written |
2908 GrowableArray<jvmtiDeferredLocalVariableSet*>* list = deferred_locals(); |
2928 GrowableArray<jvmtiDeferredLocalVariableSet*>* list = deferred_locals(); |
2987 |
3007 |
2988 #ifndef PRODUCT |
3008 #ifndef PRODUCT |
2989 void JavaThread::print_thread_state_on(outputStream *st) const { |
3009 void JavaThread::print_thread_state_on(outputStream *st) const { |
2990 st->print_cr(" JavaThread state: %s", _get_thread_state_name(_thread_state)); |
3010 st->print_cr(" JavaThread state: %s", _get_thread_state_name(_thread_state)); |
2991 }; |
3011 }; |
2992 void JavaThread::print_thread_state() const { |
|
2993 print_thread_state_on(tty); |
|
2994 } |
|
2995 #endif // PRODUCT |
3012 #endif // PRODUCT |
2996 |
3013 |
2997 // Called by Threads::print() for VM_PrintThreads operation |
3014 // Called by Threads::print() for VM_PrintThreads operation |
2998 void JavaThread::print_on(outputStream *st, bool print_extended_info) const { |
3015 void JavaThread::print_on(outputStream *st, bool print_extended_info) const { |
2999 st->print_raw("\""); |
3016 st->print_raw("\""); |
3110 } |
3127 } |
3111 assert(name_str != NULL, "unexpected NULL thread name"); |
3128 assert(name_str != NULL, "unexpected NULL thread name"); |
3112 return name_str; |
3129 return name_str; |
3113 } |
3130 } |
3114 |
3131 |
3115 |
|
3116 const char* JavaThread::get_threadgroup_name() const { |
|
3117 debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);) |
|
3118 oop thread_obj = threadObj(); |
|
3119 if (thread_obj != NULL) { |
|
3120 oop thread_group = java_lang_Thread::threadGroup(thread_obj); |
|
3121 if (thread_group != NULL) { |
|
3122 // ThreadGroup.name can be null |
|
3123 return java_lang_ThreadGroup::name(thread_group); |
|
3124 } |
|
3125 } |
|
3126 return NULL; |
|
3127 } |
|
3128 |
|
3129 const char* JavaThread::get_parent_name() const { |
|
3130 debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);) |
|
3131 oop thread_obj = threadObj(); |
|
3132 if (thread_obj != NULL) { |
|
3133 oop thread_group = java_lang_Thread::threadGroup(thread_obj); |
|
3134 if (thread_group != NULL) { |
|
3135 oop parent = java_lang_ThreadGroup::parent(thread_group); |
|
3136 if (parent != NULL) { |
|
3137 // ThreadGroup.name can be null |
|
3138 return java_lang_ThreadGroup::name(parent); |
|
3139 } |
|
3140 } |
|
3141 } |
|
3142 return NULL; |
|
3143 } |
|
3144 |
|
3145 ThreadPriority JavaThread::java_priority() const { |
|
3146 oop thr_oop = threadObj(); |
|
3147 if (thr_oop == NULL) return NormPriority; // Bootstrapping |
|
3148 ThreadPriority priority = java_lang_Thread::priority(thr_oop); |
|
3149 assert(MinPriority <= priority && priority <= MaxPriority, "sanity check"); |
|
3150 return priority; |
|
3151 } |
|
3152 |
|
3153 void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) { |
3132 void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) { |
3154 |
3133 |
3155 assert(Threads_lock->owner() == Thread::current(), "must have threads lock"); |
3134 assert(Threads_lock->owner() == Thread::current(), "must have threads lock"); |
|
3135 assert(NoPriority <= prio && prio <= MaxPriority, "sanity check"); |
3156 // Link Java Thread object <-> C++ Thread |
3136 // Link Java Thread object <-> C++ Thread |
3157 |
3137 |
3158 // Get the C++ thread object (an oop) from the JNI handle (a jthread) |
3138 // Get the C++ thread object (an oop) from the JNI handle (a jthread) |
3159 // and put it into a new Handle. The Handle "thread_oop" can then |
3139 // and put it into a new Handle. The Handle "thread_oop" can then |
3160 // be used to pass the C++ thread object to other methods. |
3140 // be used to pass the C++ thread object to other methods. |
3251 return in_WordSize(sz / wordSize); |
3231 return in_WordSize(sz / wordSize); |
3252 } |
3232 } |
3253 |
3233 |
3254 void JavaThread::popframe_free_preserved_args() { |
3234 void JavaThread::popframe_free_preserved_args() { |
3255 assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice"); |
3235 assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice"); |
3256 FREE_C_HEAP_ARRAY(char, (char*) _popframe_preserved_args); |
3236 FREE_C_HEAP_ARRAY(char, (char*)_popframe_preserved_args); |
3257 _popframe_preserved_args = NULL; |
3237 _popframe_preserved_args = NULL; |
3258 _popframe_preserved_args_size = 0; |
3238 _popframe_preserved_args_size = 0; |
3259 } |
3239 } |
3260 |
3240 |
3261 #ifndef PRODUCT |
3241 #ifndef PRODUCT |
3290 public: |
3270 public: |
3291 virtual void do_oop(oop* p) { do_oop_work(p); } |
3271 virtual void do_oop(oop* p) { do_oop_work(p); } |
3292 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
3272 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
3293 }; |
3273 }; |
3294 |
3274 |
3295 |
|
3296 static void oops_print(frame* f, const RegisterMap *map) { |
|
3297 PrintAndVerifyOopClosure print; |
|
3298 f->print_value(); |
|
3299 f->oops_do(&print, NULL, (RegisterMap*)map); |
|
3300 } |
|
3301 |
|
3302 // Print our all the locations that contain oops and whether they are |
|
3303 // valid or not. This useful when trying to find the oldest frame |
|
3304 // where an oop has gone bad since the frame walk is from youngest to |
|
3305 // oldest. |
|
3306 void JavaThread::trace_oops() { |
|
3307 tty->print_cr("[Trace oops]"); |
|
3308 frames_do(oops_print); |
|
3309 } |
|
3310 |
|
3311 |
|
3312 #ifdef ASSERT |
3275 #ifdef ASSERT |
3313 // Print or validate the layout of stack frames |
3276 // Print or validate the layout of stack frames |
3314 void JavaThread::print_frame_layout(int depth, bool validate_only) { |
3277 void JavaThread::print_frame_layout(int depth, bool validate_only) { |
3315 ResourceMark rm; |
3278 ResourceMark rm; |
3316 PRESERVE_EXCEPTION_MARK; |
3279 PRESERVE_EXCEPTION_MARK; |
3373 vfst.security_get_caller_frame(depth); |
3336 vfst.security_get_caller_frame(depth); |
3374 if (!vfst.at_end()) { |
3337 if (!vfst.at_end()) { |
3375 return vfst.method()->method_holder(); |
3338 return vfst.method()->method_holder(); |
3376 } |
3339 } |
3377 return NULL; |
3340 return NULL; |
|
3341 } |
|
3342 |
|
3343 // java.lang.Thread.sleep support |
|
3344 // Returns true if sleep time elapsed as expected, and false |
|
3345 // if the thread was interrupted. |
|
3346 bool JavaThread::sleep(jlong millis) { |
|
3347 assert(this == Thread::current(), "thread consistency check"); |
|
3348 |
|
3349 ParkEvent * const slp = this->_SleepEvent; |
|
3350 // Because there can be races with thread interruption sending an unpark() |
|
3351 // to the event, we explicitly reset it here to avoid an immediate return. |
|
3352 // The actual interrupt state will be checked before we park(). |
|
3353 slp->reset(); |
|
3354 // Thread interruption establishes a happens-before ordering in the |
|
3355 // Java Memory Model, so we need to ensure we synchronize with the |
|
3356 // interrupt state. |
|
3357 OrderAccess::fence(); |
|
3358 |
|
3359 jlong prevtime = os::javaTimeNanos(); |
|
3360 |
|
3361 for (;;) { |
|
3362 // interruption has precedence over timing out |
|
3363 if (this->is_interrupted(true)) { |
|
3364 return false; |
|
3365 } |
|
3366 |
|
3367 if (millis <= 0) { |
|
3368 return true; |
|
3369 } |
|
3370 |
|
3371 { |
|
3372 ThreadBlockInVM tbivm(this); |
|
3373 OSThreadWaitState osts(this->osthread(), false /* not Object.wait() */); |
|
3374 |
|
3375 this->set_suspend_equivalent(); |
|
3376 // cleared by handle_special_suspend_equivalent_condition() or |
|
3377 // java_suspend_self() via check_and_wait_while_suspended() |
|
3378 |
|
3379 slp->park(millis); |
|
3380 |
|
3381 // were we externally suspended while we were waiting? |
|
3382 this->check_and_wait_while_suspended(); |
|
3383 } |
|
3384 |
|
3385 // Update elapsed time tracking |
|
3386 jlong newtime = os::javaTimeNanos(); |
|
3387 if (newtime - prevtime < 0) { |
|
3388 // time moving backwards, should only happen if no monotonic clock |
|
3389 // not a guarantee() because JVM should not abort on kernel/glibc bugs |
|
3390 assert(!os::supports_monotonic_clock(), |
|
3391 "unexpected time moving backwards detected in JavaThread::sleep()"); |
|
3392 } else { |
|
3393 millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC; |
|
3394 } |
|
3395 prevtime = newtime; |
|
3396 } |
3378 } |
3397 } |
3379 |
3398 |
3380 static void compiler_thread_entry(JavaThread* thread, TRAPS) { |
3399 static void compiler_thread_entry(JavaThread* thread, TRAPS) { |
3381 assert(thread->is_Compiler_thread(), "must be compiler thread"); |
3400 assert(thread->is_Compiler_thread(), "must be compiler thread"); |
3382 CompileBroker::compiler_thread_loop(); |
3401 CompileBroker::compiler_thread_loop(); |
3487 // All JavaThreads |
3506 // All JavaThreads |
3488 #define ALL_JAVA_THREADS(X) DO_JAVA_THREADS(ThreadsSMRSupport::get_java_thread_list(), X) |
3507 #define ALL_JAVA_THREADS(X) DO_JAVA_THREADS(ThreadsSMRSupport::get_java_thread_list(), X) |
3489 |
3508 |
3490 // All NonJavaThreads (i.e., every non-JavaThread in the system). |
3509 // All NonJavaThreads (i.e., every non-JavaThread in the system). |
3491 void Threads::non_java_threads_do(ThreadClosure* tc) { |
3510 void Threads::non_java_threads_do(ThreadClosure* tc) { |
3492 NoSafepointVerifier nsv(!SafepointSynchronize::is_at_safepoint(), false); |
3511 NoSafepointVerifier nsv; |
3493 for (NonJavaThread::Iterator njti; !njti.end(); njti.step()) { |
3512 for (NonJavaThread::Iterator njti; !njti.end(); njti.step()) { |
3494 tc->do_thread(njti.current()); |
3513 tc->do_thread(njti.current()); |
3495 } |
3514 } |
3496 } |
3515 } |
3497 |
3516 |
3648 initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK); |
3667 initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK); |
3649 initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK); |
3668 initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK); |
3650 initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK); |
3669 initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK); |
3651 initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK); |
3670 initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK); |
3652 initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK); |
3671 initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK); |
|
3672 |
|
3673 // Eager box cache initialization only if AOT is on and any library is loaded. |
|
3674 AOTLoader::initialize_box_caches(CHECK); |
3653 } |
3675 } |
3654 |
3676 |
3655 void Threads::initialize_jsr292_core_classes(TRAPS) { |
3677 void Threads::initialize_jsr292_core_classes(TRAPS) { |
3656 TraceTime timer("Initialize java.lang.invoke classes", TRACETIME_LOG(Info, startuptime)); |
3678 TraceTime timer("Initialize java.lang.invoke classes", TRACETIME_LOG(Info, startuptime)); |
3657 |
3679 |
3767 // Initialize global data structures and create system classes in heap |
3789 // Initialize global data structures and create system classes in heap |
3768 vm_init_globals(); |
3790 vm_init_globals(); |
3769 |
3791 |
3770 #if INCLUDE_JVMCI |
3792 #if INCLUDE_JVMCI |
3771 if (JVMCICounterSize > 0) { |
3793 if (JVMCICounterSize > 0) { |
3772 JavaThread::_jvmci_old_thread_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtInternal); |
3794 JavaThread::_jvmci_old_thread_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtJVMCI); |
3773 memset(JavaThread::_jvmci_old_thread_counters, 0, sizeof(jlong) * JVMCICounterSize); |
3795 memset(JavaThread::_jvmci_old_thread_counters, 0, sizeof(jlong) * JVMCICounterSize); |
3774 } else { |
3796 } else { |
3775 JavaThread::_jvmci_old_thread_counters = NULL; |
3797 JavaThread::_jvmci_old_thread_counters = NULL; |
3776 } |
3798 } |
3777 #endif // INCLUDE_JVMCI |
3799 #endif // INCLUDE_JVMCI |
3824 JvmtiExport::transition_pending_onload_raw_monitors(); |
3846 JvmtiExport::transition_pending_onload_raw_monitors(); |
3825 |
3847 |
3826 // Create the VMThread |
3848 // Create the VMThread |
3827 { TraceTime timer("Start VMThread", TRACETIME_LOG(Info, startuptime)); |
3849 { TraceTime timer("Start VMThread", TRACETIME_LOG(Info, startuptime)); |
3828 |
3850 |
3829 VMThread::create(); |
3851 VMThread::create(); |
3830 Thread* vmthread = VMThread::vm_thread(); |
3852 Thread* vmthread = VMThread::vm_thread(); |
3831 |
3853 |
3832 if (!os::create_thread(vmthread, os::vm_thread)) { |
3854 if (!os::create_thread(vmthread, os::vm_thread)) { |
3833 vm_exit_during_initialization("Cannot create VM thread. " |
3855 vm_exit_during_initialization("Cannot create VM thread. " |
3834 "Out of system resources."); |
3856 "Out of system resources."); |
4161 JvmtiExport::enter_onload_phase(); |
4184 JvmtiExport::enter_onload_phase(); |
4162 |
4185 |
4163 for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) { |
4186 for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) { |
4164 // CDS dumping does not support native JVMTI agent. |
4187 // CDS dumping does not support native JVMTI agent. |
4165 // CDS dumping supports Java agent if the AllowArchivingWithJavaAgent diagnostic option is specified. |
4188 // CDS dumping supports Java agent if the AllowArchivingWithJavaAgent diagnostic option is specified. |
4166 if (DumpSharedSpaces || DynamicDumpSharedSpaces) { |
4189 if (Arguments::is_dumping_archive()) { |
4167 if(!agent->is_instrument_lib()) { |
4190 if(!agent->is_instrument_lib()) { |
4168 vm_exit_during_cds_dumping("CDS dumping does not support native JVMTI agent, name", agent->name()); |
4191 vm_exit_during_cds_dumping("CDS dumping does not support native JVMTI agent, name", agent->name()); |
4169 } else if (!AllowArchivingWithJavaAgent) { |
4192 } else if (!AllowArchivingWithJavaAgent) { |
4170 vm_exit_during_cds_dumping( |
4193 vm_exit_during_cds_dumping( |
4171 "Must enable AllowArchivingWithJavaAgent in order to run Java agent during CDS dumping"); |
4194 "Must enable AllowArchivingWithJavaAgent in order to run Java agent during CDS dumping"); |
4442 Events::log(p, "Thread added: " INTPTR_FORMAT, p2i(p)); |
4465 Events::log(p, "Thread added: " INTPTR_FORMAT, p2i(p)); |
4443 } |
4466 } |
4444 |
4467 |
4445 void Threads::remove(JavaThread* p, bool is_daemon) { |
4468 void Threads::remove(JavaThread* p, bool is_daemon) { |
4446 |
4469 |
4447 // Reclaim the ObjectMonitors from the omInUseList and omFreeList of the moribund thread. |
4470 // Reclaim the ObjectMonitors from the om_in_use_list and om_free_list of the moribund thread. |
4448 ObjectSynchronizer::omFlush(p); |
4471 ObjectSynchronizer::om_flush(p); |
4449 |
4472 |
4450 // Extra scope needed for Thread_lock, so we can check |
4473 // Extra scope needed for Thread_lock, so we can check |
4451 // that we do not remove thread without safepoint code notice |
4474 // that we do not remove thread without safepoint code notice |
4452 { MonitorLocker ml(Threads_lock); |
4475 { MonitorLocker ml(Threads_lock); |
4453 |
4476 |
4930 Self->park(); |
4953 Self->park(); |
4931 } |
4954 } |
4932 } |
4955 } |
4933 } |
4956 } |
4934 |
4957 |
4935 void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) { |
|
4936 intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0); |
|
4937 if (w == 0) return; |
|
4938 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { |
|
4939 return; |
|
4940 } |
|
4941 |
|
4942 ParkEvent * ReleaseAfter = NULL; |
|
4943 if (ev == NULL) { |
|
4944 ev = ReleaseAfter = ParkEvent::Allocate(NULL); |
|
4945 } |
|
4946 assert((intptr_t(ev) & LOCKBIT) == 0, "invariant"); |
|
4947 for (;;) { |
|
4948 guarantee(ev->OnList == 0, "invariant"); |
|
4949 int its = (os::is_MP() ? 100 : 0) + 1; |
|
4950 |
|
4951 // Optional spin phase: spin-then-park strategy |
|
4952 while (--its >= 0) { |
|
4953 w = *Lock; |
|
4954 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { |
|
4955 if (ReleaseAfter != NULL) { |
|
4956 ParkEvent::Release(ReleaseAfter); |
|
4957 } |
|
4958 return; |
|
4959 } |
|
4960 } |
|
4961 |
|
4962 ev->reset(); |
|
4963 ev->OnList = intptr_t(Lock); |
|
4964 // The following fence() isn't _strictly necessary as the subsequent |
|
4965 // CAS() both serializes execution and ratifies the fetched *Lock value. |
|
4966 OrderAccess::fence(); |
|
4967 for (;;) { |
|
4968 w = *Lock; |
|
4969 if ((w & LOCKBIT) == 0) { |
|
4970 if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { |
|
4971 ev->OnList = 0; |
|
4972 // We call ::Release while holding the outer lock, thus |
|
4973 // artificially lengthening the critical section. |
|
4974 // Consider deferring the ::Release() until the subsequent unlock(), |
|
4975 // after we've dropped the outer lock. |
|
4976 if (ReleaseAfter != NULL) { |
|
4977 ParkEvent::Release(ReleaseAfter); |
|
4978 } |
|
4979 return; |
|
4980 } |
|
4981 continue; // Interference -- *Lock changed -- Just retry |
|
4982 } |
|
4983 assert(w & LOCKBIT, "invariant"); |
|
4984 ev->ListNext = (ParkEvent *) (w & ~LOCKBIT); |
|
4985 if (Atomic::cmpxchg(intptr_t(ev)|LOCKBIT, Lock, w) == w) break; |
|
4986 } |
|
4987 |
|
4988 while (ev->OnList != 0) { |
|
4989 ev->park(); |
|
4990 } |
|
4991 } |
|
4992 } |
|
4993 |
|
4994 // Release() must extract a successor from the list and then wake that thread. |
4958 // Release() must extract a successor from the list and then wake that thread. |
4995 // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme |
4959 // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme |
4996 // similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based |
4960 // similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based |
4997 // Release() would : |
4961 // Release() would : |
4998 // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list. |
4962 // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list. |