diff -r 84b0544833c4 -r 060d9d139109 src/hotspot/share/runtime/thread.cpp --- a/src/hotspot/share/runtime/thread.cpp Thu Sep 12 11:35:02 2019 +0200 +++ b/src/hotspot/share/runtime/thread.cpp Sat Sep 14 13:03:44 2019 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "jvm.h" +#include "aot/aotLoader.hpp" #include "classfile/classLoader.hpp" #include "classfile/javaClasses.hpp" #include "classfile/moduleEntry.hpp" @@ -172,7 +173,7 @@ // Support for forcing alignment of thread objects for biased locking void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) { if (UseBiasedLocking) { - const int alignment = markOopDesc::biased_lock_alignment; + const size_t alignment = markWord::biased_lock_alignment; size_t aligned_size = size + (alignment - sizeof(intptr_t)); void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC) : AllocateHeap(aligned_size, flags, CURRENT_PC, @@ -222,7 +223,6 @@ // stack and get_thread set_stack_base(NULL); set_stack_size(0); - set_self_raw_id(0); set_lgrp_id(-1); DEBUG_ONLY(clear_suspendible_thread();) @@ -249,8 +249,7 @@ // plain initialization debug_only(_owned_locks = NULL;) - debug_only(_allow_allocation_count = 0;) - NOT_PRODUCT(_allow_safepoint_count = 0;) + NOT_PRODUCT(_no_safepoint_count = 0;) NOT_PRODUCT(_skip_gcalot = false;) _jvmti_env_iteration_count = 0; set_allocated_bytes(0); @@ -260,11 +259,11 @@ _current_pending_monitor_is_from_java = true; _current_waiting_monitor = NULL; _num_nested_signal = 0; - omFreeList = NULL; - omFreeCount = 0; - omFreeProvision = 32; - omInUseList = NULL; - omInUseCount = 0; + om_free_list = NULL; + om_free_count = 0; + om_free_provision = 32; + om_in_use_list = NULL; + om_in_use_count = 0; #ifdef ASSERT _visited_for_critical_count = false; @@ -292,7 +291,6 @@ // The stack would act as a cache to avoid calls to ParkEvent::Allocate() // and ::Release() _ParkEvent = ParkEvent::Allocate(this); - _SleepEvent = ParkEvent::Allocate(this); _MuxEvent = ParkEvent::Allocate(this); #ifdef CHECK_UNHANDLED_OOPS @@ -302,9 +300,9 @@ #endif // CHECK_UNHANDLED_OOPS #ifdef ASSERT if (UseBiasedLocking) { - assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed"); + assert(is_aligned(this, markWord::biased_lock_alignment), "forced alignment of thread object failed"); assert(this == _real_malloc_address || - this == align_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment), + this == align_up(_real_malloc_address, markWord::biased_lock_alignment), "bug in forced alignment of thread objects"); } #endif // ASSERT @@ -457,7 +455,6 @@ // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads. // We NULL out the fields for good hygiene. ParkEvent::Release(_ParkEvent); _ParkEvent = NULL; - ParkEvent::Release(_SleepEvent); _SleepEvent = NULL; ParkEvent::Release(_MuxEvent); _MuxEvent = NULL; delete handle_area(); @@ -859,23 +856,6 @@ return true; } -#ifndef PRODUCT -void JavaThread::record_jump(address target, address instr, const char* file, - int line) { - - // This should not need to be atomic as the only way for simultaneous - // updates is via interrupts. Even then this should be rare or non-existent - // and we don't care that much anyway. - - int index = _jmp_ring_index; - _jmp_ring_index = (index + 1) & (jump_ring_buffer_size - 1); - _jmp_ring[index]._target = (intptr_t) target; - _jmp_ring[index]._instruction = (intptr_t) instr; - _jmp_ring[index]._file = file; - _jmp_ring[index]._line = line; -} -#endif // PRODUCT - void Thread::interrupt(Thread* thread) { debug_only(check_for_dangling_thread_pointer(thread);) os::interrupt(thread); @@ -989,7 +969,7 @@ #ifdef ASSERT void Thread::print_owned_locks_on(outputStream* st) const { - Monitor *cur = _owned_locks; + Mutex* cur = _owned_locks; if (cur == NULL) { st->print(" (no locks) "); } else { @@ -1001,38 +981,35 @@ } } -static int ref_use_count = 0; - -bool Thread::owns_locks_but_compiled_lock() const { - for (Monitor *cur = _owned_locks; cur; cur = cur->next()) { - if (cur != Compile_lock) return true; - } - return false; +// Checks safepoint allowed and clears unhandled oops at potential safepoints. +void Thread::check_possible_safepoint() { + if (!is_Java_thread()) return; + + if (_no_safepoint_count > 0) { + fatal("Possible safepoint reached by thread that does not allow it"); + } +#ifdef CHECK_UNHANDLED_OOPS + // Clear unhandled oops in JavaThreads so we get a crash right away. + clear_unhandled_oops(); +#endif // CHECK_UNHANDLED_OOPS } - -#endif - -#ifndef PRODUCT - // The flag: potential_vm_operation notifies if this particular safepoint state could potentially // invoke the vm-thread (e.g., an oop allocation). In that case, we also have to make sure that // no locks which allow_vm_block's are held void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) { - // Check if current thread is allowed to block at a safepoint - if (!(_allow_safepoint_count == 0)) { - fatal("Possible safepoint reached by thread that does not allow it"); - } - if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) { + if (!is_Java_thread()) return; + + check_possible_safepoint(); + + if (((JavaThread*)this)->thread_state() != _thread_in_vm) { fatal("LEAF method calling lock?"); } -#ifdef ASSERT - if (potential_vm_operation && is_Java_thread() - && !Universe::is_bootstrapping()) { + if (potential_vm_operation && !Universe::is_bootstrapping()) { // Make sure we do not hold any locks that the VM thread also uses. // This could potentially lead to deadlocks - for (Monitor *cur = _owned_locks; cur; cur = cur->next()) { + for (Mutex* cur = _owned_locks; cur; cur = cur->next()) { // Threads_lock is special, since the safepoint synchronization will not start before this is // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock, // since it is used to transfer control between JavaThreads and the VMThread @@ -1052,9 +1029,8 @@ // We could enter a safepoint here and thus have a gc InterfaceSupport::check_gc_alot(); } -#endif } -#endif +#endif // ASSERT bool Thread::is_in_stack(address adr) const { assert(Thread::current() == this, "is_in_stack can only be called from current thread"); @@ -1350,16 +1326,12 @@ {} NamedThread::~NamedThread() { - if (_name != NULL) { - FREE_C_HEAP_ARRAY(char, _name); - _name = NULL; - } + FREE_C_HEAP_ARRAY(char, _name); } void NamedThread::set_name(const char* format, ...) { guarantee(_name == NULL, "Only get to set name once."); _name = NEW_C_HEAP_ARRAY(char, max_name_len, mtThread); - guarantee(_name != NULL, "alloc failure"); va_list ap; va_start(ap, format); jio_vsnprintf(_name, max_name_len, format, ap); @@ -1591,6 +1563,78 @@ } } +// Attempt to enlarge the array for per thread counters. +jlong* resize_counters_array(jlong* old_counters, int current_size, int new_size) { + jlong* new_counters = NEW_C_HEAP_ARRAY(jlong, new_size, mtJVMCI); + if (new_counters == NULL) { + return NULL; + } + if (old_counters == NULL) { + old_counters = new_counters; + memset(old_counters, 0, sizeof(jlong) * new_size); + } else { + for (int i = 0; i < MIN2((int) current_size, new_size); i++) { + new_counters[i] = old_counters[i]; + } + if (new_size > current_size) { + memset(new_counters + current_size, 0, sizeof(jlong) * (new_size - current_size)); + } + FREE_C_HEAP_ARRAY(jlong, old_counters); + } + return new_counters; +} + +// Attempt to enlarge the array for per thread counters. +bool JavaThread::resize_counters(int current_size, int new_size) { + jlong* new_counters = resize_counters_array(_jvmci_counters, current_size, new_size); + if (new_counters == NULL) { + return false; + } else { + _jvmci_counters = new_counters; + return true; + } +} + +class VM_JVMCIResizeCounters : public VM_Operation { + private: + int _new_size; + bool _failed; + + public: + VM_JVMCIResizeCounters(int new_size) : _new_size(new_size), _failed(false) { } + VMOp_Type type() const { return VMOp_JVMCIResizeCounters; } + bool allow_nested_vm_operations() const { return true; } + void doit() { + // Resize the old thread counters array + jlong* new_counters = resize_counters_array(JavaThread::_jvmci_old_thread_counters, JVMCICounterSize, _new_size); + if (new_counters == NULL) { + _failed = true; + return; + } else { + JavaThread::_jvmci_old_thread_counters = new_counters; + } + + // Now resize each threads array + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *tp = jtiwh.next(); ) { + if (!tp->resize_counters(JVMCICounterSize, _new_size)) { + _failed = true; + break; + } + } + if (!_failed) { + JVMCICounterSize = _new_size; + } + } + + bool failed() { return _failed; } +}; + +bool JavaThread::resize_all_jvmci_counters(int new_size) { + VM_JVMCIResizeCounters op(new_size); + VMThread::execute(&op); + return !op.failed(); +} + #endif // INCLUDE_JVMCI // A JavaThread is a normal Java thread @@ -1611,9 +1655,7 @@ set_deferred_locals(NULL); set_deopt_mark(NULL); set_deopt_compiled_method(NULL); - clear_must_deopt_id(); set_monitor_chunks(NULL); - set_next(NULL); _on_thread_list = false; set_thread_state(_thread_new); _terminated = _not_terminated; @@ -1630,11 +1672,9 @@ _in_retryable_allocation = false; _jvmci._alternate_call_target = NULL; assert(_jvmci._implicit_exception_pc == NULL, "must be"); + _jvmci_counters = NULL; if (JVMCICounterSize > 0) { - _jvmci_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtInternal); - memset(_jvmci_counters, 0, sizeof(jlong) * JVMCICounterSize); - } else { - _jvmci_counters = NULL; + resize_counters(0, (int) JVMCICounterSize); } #endif // INCLUDE_JVMCI _reserved_stack_activation = NULL; // stack base not known yet @@ -1649,20 +1689,12 @@ _pending_async_exception = NULL; _thread_stat = NULL; _thread_stat = new ThreadStatistics(); - _blocked_on_compilation = false; _jni_active_critical = 0; _pending_jni_exception_check_fn = NULL; _do_not_unlock_if_synchronized = false; _cached_monitor_info = NULL; _parker = Parker::Allocate(this); - -#ifndef PRODUCT - _jmp_ring_index = 0; - for (int ji = 0; ji < jump_ring_buffer_size; ji++) { - record_jump(NULL, NULL, NULL, 0); - } -#endif // PRODUCT - + _SleepEvent = ParkEvent::Allocate(this); // Setup safepoint state info for this thread ThreadSafepointState::create(this); @@ -1735,7 +1767,7 @@ if (_terminated == _vm_exited) { // _vm_exited is set at safepoint, and Threads_lock is never released // we will block here forever - Threads_lock->lock_without_safepoint_check(); + Threads_lock->lock(); ShouldNotReachHere(); } } @@ -1774,6 +1806,10 @@ Parker::Release(_parker); _parker = NULL; + // Return the sleep event to the free list + ParkEvent::Release(_SleepEvent); + _SleepEvent = NULL; + // Free any remaining previous UnrollBlock vframeArray* old_array = vframe_array_last(); @@ -2275,14 +2311,9 @@ } void JavaThread::handle_special_runtime_exit_condition(bool check_asyncs) { - // + // Check for pending external suspend. - // If JNIEnv proxies are allowed, don't self-suspend if the target - // thread is not the current thread. In older versions of jdbx, jdbx - // threads could call into the VM with another thread's JNIEnv so we - // can be here operating on behalf of a suspended thread (4432884). - bool do_self_suspend = is_external_suspend_with_lock(); - if (do_self_suspend && (!AllowJNIEnvProxy || this == JavaThread::current())) { + if (is_external_suspend_with_lock()) { frame_anchor()->make_walkable(this); java_suspend_self_with_safepoint_check(); } @@ -2507,19 +2538,12 @@ void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread) { assert(thread->thread_state() == _thread_in_native_trans, "wrong state"); - JavaThread *curJT = JavaThread::current(); - bool do_self_suspend = thread->is_external_suspend(); - - assert(!curJT->has_last_Java_frame() || curJT->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition"); - - // If JNIEnv proxies are allowed, don't self-suspend if the target - // thread is not the current thread. In older versions of jdbx, jdbx - // threads could call into the VM with another thread's JNIEnv so we - // can be here operating on behalf of a suspended thread (4432884). - if (do_self_suspend && (!AllowJNIEnvProxy || curJT == thread)) { + assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition"); + + if (thread->is_external_suspend()) { thread->java_suspend_self_with_safepoint_check(); } else { - SafepointMechanism::block_if_requested(curJT); + SafepointMechanism::block_if_requested(thread); } JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(thread);) @@ -2899,11 +2923,6 @@ } } - // callee_target is never live across a gc point so NULL it here should - // it still contain a methdOop. - - set_callee_target(NULL); - assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!"); // If we have deferred set_locals there might be oops waiting to be // written @@ -2991,9 +3010,6 @@ void JavaThread::print_thread_state_on(outputStream *st) const { st->print_cr(" JavaThread state: %s", _get_thread_state_name(_thread_state)); }; -void JavaThread::print_thread_state() const { - print_thread_state_on(tty); -} #endif // PRODUCT // Called by Threads::print() for VM_PrintThreads operation @@ -3114,47 +3130,10 @@ return name_str; } - -const char* JavaThread::get_threadgroup_name() const { - debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);) - oop thread_obj = threadObj(); - if (thread_obj != NULL) { - oop thread_group = java_lang_Thread::threadGroup(thread_obj); - if (thread_group != NULL) { - // ThreadGroup.name can be null - return java_lang_ThreadGroup::name(thread_group); - } - } - return NULL; -} - -const char* JavaThread::get_parent_name() const { - debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);) - oop thread_obj = threadObj(); - if (thread_obj != NULL) { - oop thread_group = java_lang_Thread::threadGroup(thread_obj); - if (thread_group != NULL) { - oop parent = java_lang_ThreadGroup::parent(thread_group); - if (parent != NULL) { - // ThreadGroup.name can be null - return java_lang_ThreadGroup::name(parent); - } - } - } - return NULL; -} - -ThreadPriority JavaThread::java_priority() const { - oop thr_oop = threadObj(); - if (thr_oop == NULL) return NormPriority; // Bootstrapping - ThreadPriority priority = java_lang_Thread::priority(thr_oop); - assert(MinPriority <= priority && priority <= MaxPriority, "sanity check"); - return priority; -} - void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) { assert(Threads_lock->owner() == Thread::current(), "must have threads lock"); + assert(NoPriority <= prio && prio <= MaxPriority, "sanity check"); // Link Java Thread object <-> C++ Thread // Get the C++ thread object (an oop) from the JNI handle (a jthread) @@ -3255,7 +3234,7 @@ void JavaThread::popframe_free_preserved_args() { assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice"); - FREE_C_HEAP_ARRAY(char, (char*) _popframe_preserved_args); + FREE_C_HEAP_ARRAY(char, (char*)_popframe_preserved_args); _popframe_preserved_args = NULL; _popframe_preserved_args_size = 0; } @@ -3294,23 +3273,6 @@ virtual void do_oop(narrowOop* p) { do_oop_work(p); } }; - -static void oops_print(frame* f, const RegisterMap *map) { - PrintAndVerifyOopClosure print; - f->print_value(); - f->oops_do(&print, NULL, (RegisterMap*)map); -} - -// Print our all the locations that contain oops and whether they are -// valid or not. This useful when trying to find the oldest frame -// where an oop has gone bad since the frame walk is from youngest to -// oldest. -void JavaThread::trace_oops() { - tty->print_cr("[Trace oops]"); - frames_do(oops_print); -} - - #ifdef ASSERT // Print or validate the layout of stack frames void JavaThread::print_frame_layout(int depth, bool validate_only) { @@ -3379,6 +3341,62 @@ return NULL; } +// java.lang.Thread.sleep support +// Returns true if sleep time elapsed as expected, and false +// if the thread was interrupted. +bool JavaThread::sleep(jlong millis) { + assert(this == Thread::current(), "thread consistency check"); + + ParkEvent * const slp = this->_SleepEvent; + // Because there can be races with thread interruption sending an unpark() + // to the event, we explicitly reset it here to avoid an immediate return. + // The actual interrupt state will be checked before we park(). + slp->reset(); + // Thread interruption establishes a happens-before ordering in the + // Java Memory Model, so we need to ensure we synchronize with the + // interrupt state. + OrderAccess::fence(); + + jlong prevtime = os::javaTimeNanos(); + + for (;;) { + // interruption has precedence over timing out + if (os::is_interrupted(this, true)) { + return false; + } + + if (millis <= 0) { + return true; + } + + { + ThreadBlockInVM tbivm(this); + OSThreadWaitState osts(this->osthread(), false /* not Object.wait() */); + + this->set_suspend_equivalent(); + // cleared by handle_special_suspend_equivalent_condition() or + // java_suspend_self() via check_and_wait_while_suspended() + + slp->park(millis); + + // were we externally suspended while we were waiting? + this->check_and_wait_while_suspended(); + } + + // Update elapsed time tracking + jlong newtime = os::javaTimeNanos(); + if (newtime - prevtime < 0) { + // time moving backwards, should only happen if no monotonic clock + // not a guarantee() because JVM should not abort on kernel/glibc bugs + assert(!os::supports_monotonic_clock(), + "unexpected time moving backwards detected in os::sleep()"); + } else { + millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC; + } + prevtime = newtime; + } +} + static void compiler_thread_entry(JavaThread* thread, TRAPS) { assert(thread->is_Compiler_thread(), "must be compiler thread"); CompileBroker::compiler_thread_loop(); @@ -3457,7 +3475,6 @@ // would like. We are actively migrating Threads_lock uses to other // mechanisms in order to reduce Threads_lock contention. -JavaThread* Threads::_thread_list = NULL; int Threads::_number_of_threads = 0; int Threads::_number_of_non_daemon_threads = 0; int Threads::_return_code = 0; @@ -3492,7 +3509,7 @@ // All NonJavaThreads (i.e., every non-JavaThread in the system). void Threads::non_java_threads_do(ThreadClosure* tc) { - NoSafepointVerifier nsv(!SafepointSynchronize::is_at_safepoint(), false); + NoSafepointVerifier nsv; for (NonJavaThread::Iterator njti; !njti.end(); njti.step()) { tc->do_thread(njti.current()); } @@ -3653,6 +3670,9 @@ initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK); initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK); initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK); + + // Eager box cache initialization only if AOT is on and any library is loaded. + AOTLoader::initialize_box_caches(CHECK); } void Threads::initialize_jsr292_core_classes(TRAPS) { @@ -3764,7 +3784,6 @@ } // Initialize Threads state - _thread_list = NULL; _number_of_threads = 0; _number_of_non_daemon_threads = 0; @@ -3773,7 +3792,7 @@ #if INCLUDE_JVMCI if (JVMCICounterSize > 0) { - JavaThread::_jvmci_old_thread_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtInternal); + JavaThread::_jvmci_old_thread_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtJVMCI); memset(JavaThread::_jvmci_old_thread_counters, 0, sizeof(jlong) * JVMCICounterSize); } else { JavaThread::_jvmci_old_thread_counters = NULL; @@ -3916,6 +3935,7 @@ Chunk::start_chunk_pool_cleaner_task(); } + // initialize compiler(s) #if defined(COMPILER1) || COMPILER2_OR_JVMCI #if INCLUDE_JVMCI @@ -3964,10 +3984,8 @@ SystemDictionary::compute_java_loaders(CHECK_JNI_ERR); #if INCLUDE_CDS - if (DumpSharedSpaces) { - // capture the module path info from the ModuleEntryTable - ClassLoader::initialize_module_path(THREAD); - } + // capture the module path info from the ModuleEntryTable + ClassLoader::initialize_module_path(THREAD); #endif #if INCLUDE_JVMCI @@ -4169,7 +4187,7 @@ for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) { // CDS dumping does not support native JVMTI agent. // CDS dumping supports Java agent if the AllowArchivingWithJavaAgent diagnostic option is specified. - if (DumpSharedSpaces) { + if (DumpSharedSpaces || DynamicDumpSharedSpaces) { if(!agent->is_instrument_lib()) { vm_exit_during_cds_dumping("CDS dumping does not support native JVMTI agent, name", agent->name()); } else if (!AllowArchivingWithJavaAgent) { @@ -4425,9 +4443,6 @@ BarrierSet::barrier_set()->on_thread_attach(p); - p->set_next(_thread_list); - _thread_list = p; - // Once a JavaThread is added to the Threads list, smr_delete() has // to be used to delete it. Otherwise we can just delete it directly. p->set_on_thread_list(); @@ -4453,8 +4468,8 @@ void Threads::remove(JavaThread* p, bool is_daemon) { - // Reclaim the ObjectMonitors from the omInUseList and omFreeList of the moribund thread. - ObjectSynchronizer::omFlush(p); + // Reclaim the ObjectMonitors from the om_in_use_list and om_free_list of the moribund thread. + ObjectSynchronizer::om_flush(p); // Extra scope needed for Thread_lock, so we can check // that we do not remove thread without safepoint code notice @@ -4465,20 +4480,6 @@ // Maintain fast thread list ThreadsSMRSupport::remove_thread(p); - JavaThread* current = _thread_list; - JavaThread* prev = NULL; - - while (current != p) { - prev = current; - current = current->next(); - } - - if (prev) { - prev->set_next(current->next()); - } else { - _thread_list = p->next(); - } - _number_of_threads--; if (!is_daemon) { _number_of_non_daemon_threads--;