--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp Fri Apr 26 10:18:47 2019 +0200
@@ -33,7 +33,6 @@
// (see globals.hpp)
define_pd_global(bool, ShareVtableStubs, true);
-define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false);
--- a/src/hotspot/cpu/arm/globals_arm.hpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/cpu/arm/globals_arm.hpp Fri Apr 26 10:18:47 2019 +0200
@@ -40,8 +40,6 @@
define_pd_global(intx, CodeEntryAlignment, 16);
define_pd_global(intx, OptoLoopAlignment, 16);
-define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
-
#define DEFAULT_STACK_YELLOW_PAGES (2)
#define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_SHADOW_PAGES (5 DEBUG_ONLY(+1))
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/cpu/ppc/globals_ppc.hpp Fri Apr 26 10:18:47 2019 +0200
@@ -33,8 +33,6 @@
// (see globals.hpp)
define_pd_global(bool, ShareVtableStubs, true);
-define_pd_global(bool, NeedsDeoptSuspend, false); // Only register window machines need this.
-
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.
define_pd_global(bool, TrapBasedNullChecks, true);
--- a/src/hotspot/cpu/s390/globals_s390.hpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/cpu/s390/globals_s390.hpp Fri Apr 26 10:18:47 2019 +0200
@@ -35,7 +35,6 @@
// z/Architecture remembers branch targets, so don't share vtables.
define_pd_global(bool, ShareVtableStubs, true);
-define_pd_global(bool, NeedsDeoptSuspend, false); // Only register window machines need this.
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.
define_pd_global(bool, TrapBasedNullChecks, true);
--- a/src/hotspot/cpu/sparc/globals_sparc.hpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/cpu/sparc/globals_sparc.hpp Fri Apr 26 10:18:47 2019 +0200
@@ -38,7 +38,6 @@
// according to the prior table. So, we let the thread continue and let it block by itself.
define_pd_global(bool, DontYieldALot, true); // yield no more than 100 times per second
define_pd_global(bool, ShareVtableStubs, false); // improves performance markedly for mtrt and compress
-define_pd_global(bool, NeedsDeoptSuspend, true); // register window machines need this
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on sparc.
--- a/src/hotspot/cpu/x86/globals_x86.hpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/cpu/x86/globals_x86.hpp Fri Apr 26 10:18:47 2019 +0200
@@ -32,7 +32,6 @@
// (see globals.hpp)
define_pd_global(bool, ShareVtableStubs, true);
-define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on x86.
--- a/src/hotspot/cpu/zero/globals_zero.hpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/cpu/zero/globals_zero.hpp Fri Apr 26 10:18:47 2019 +0200
@@ -33,7 +33,6 @@
// runtime system. See globals.hpp for details of what they do.
define_pd_global(bool, ShareVtableStubs, true);
-define_pd_global(bool, NeedsDeoptSuspend, false);
define_pd_global(bool, ImplicitNullChecks, true);
define_pd_global(bool, TrapBasedNullChecks, false);
--- a/src/hotspot/share/runtime/arguments.cpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/share/runtime/arguments.cpp Fri Apr 26 10:18:47 2019 +0200
@@ -565,6 +565,7 @@
{ "ProfilerNumberOfStubMethods", JDK_Version::undefined(), JDK_Version::jdk(13), JDK_Version::jdk(14) },
{ "ProfilerNumberOfRuntimeStubNodes", JDK_Version::undefined(), JDK_Version::jdk(13), JDK_Version::jdk(14) },
{ "UseImplicitStableValues", JDK_Version::undefined(), JDK_Version::jdk(13), JDK_Version::jdk(14) },
+ { "NeedsDeoptSuspend", JDK_Version::undefined(), JDK_Version::jdk(13), JDK_Version::jdk(14) },
#ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS
// These entries will generate build errors. Their purpose is to test the macros.
--- a/src/hotspot/share/runtime/frame.cpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/share/runtime/frame.cpp Fri Apr 26 10:18:47 2019 +0200
@@ -271,64 +271,11 @@
}
void frame::deoptimize(JavaThread* thread) {
+ assert(thread->frame_anchor()->has_last_Java_frame() &&
+ thread->frame_anchor()->walkable(), "must be");
// Schedule deoptimization of an nmethod activation with this frame.
assert(_cb != NULL && _cb->is_compiled(), "must be");
- // This is a fix for register window patching race
- if (NeedsDeoptSuspend && Thread::current() != thread) {
- assert(SafepointSynchronize::is_at_safepoint(),
- "patching other threads for deopt may only occur at a safepoint");
-
- // It is possible especially with DeoptimizeALot/DeoptimizeRandom that
- // we could see the frame again and ask for it to be deoptimized since
- // it might move for a long time. That is harmless and we just ignore it.
- if (id() == thread->must_deopt_id()) {
- assert(thread->is_deopt_suspend(), "lost suspension");
- return;
- }
-
- // We are at a safepoint so the target thread can only be
- // in 4 states:
- // blocked - no problem
- // blocked_trans - no problem (i.e. could have woken up from blocked
- // during a safepoint).
- // native - register window pc patching race
- // native_trans - momentary state
- //
- // We could just wait out a thread in native_trans to block.
- // Then we'd have all the issues that the safepoint code has as to
- // whether to spin or block. It isn't worth it. Just treat it like
- // native and be done with it.
- //
- // Examine the state of the thread at the start of safepoint since
- // threads that were in native at the start of the safepoint could
- // come to a halt during the safepoint, changing the current value
- // of the safepoint_state.
- JavaThreadState state = thread->safepoint_state()->orig_thread_state();
- if (state == _thread_in_native || state == _thread_in_native_trans) {
- // Since we are at a safepoint the target thread will stop itself
- // before it can return to java as long as we remain at the safepoint.
- // Therefore we can put an additional request for the thread to stop
- // no matter what no (like a suspend). This will cause the thread
- // to notice it needs to do the deopt on its own once it leaves native.
- //
- // The only reason we must do this is because on machine with register
- // windows we have a race with patching the return address and the
- // window coming live as the thread returns to the Java code (but still
- // in native mode) and then blocks. It is only this top most frame
- // that is at risk. So in truth we could add an additional check to
- // see if this frame is one that is at risk.
- RegisterMap map(thread, false);
- frame at_risk = thread->last_frame().sender(&map);
- if (id() == at_risk.id()) {
- thread->set_must_deopt_id(id());
- thread->set_deopt_suspend();
- return;
- }
- }
- } // NeedsDeoptSuspend
-
-
// If the call site is a MethodHandle call site use the MH deopt
// handler.
CompiledMethod* cm = (CompiledMethod*) _cb;
--- a/src/hotspot/share/runtime/globals.hpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/share/runtime/globals.hpp Fri Apr 26 10:18:47 2019 +0200
@@ -324,9 +324,6 @@
"Maximum number of pages to include in the page scan procedure") \
range(0, max_uintx) \
\
- product_pd(bool, NeedsDeoptSuspend, \
- "True for register window machines (sparc/ia64)") \
- \
product(intx, UseSSE, 99, \
"Highest supported SSE instructions set on x86/x64") \
range(0, 99) \
--- a/src/hotspot/share/runtime/thread.cpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/share/runtime/thread.cpp Fri Apr 26 10:18:47 2019 +0200
@@ -2526,21 +2526,6 @@
SafepointMechanism::block_if_requested(curJT);
}
- if (thread->is_deopt_suspend()) {
- thread->clear_deopt_suspend();
- RegisterMap map(thread, false);
- frame f = thread->last_frame();
- while (f.id() != thread->must_deopt_id() && ! f.is_first_frame()) {
- f = f.sender(&map);
- }
- if (f.id() == thread->must_deopt_id()) {
- thread->clear_must_deopt_id();
- f.deoptimize(thread);
- } else {
- fatal("missed deoptimization!");
- }
- }
-
JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(thread);)
}
--- a/src/hotspot/share/runtime/thread.hpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/share/runtime/thread.hpp Fri Apr 26 10:18:47 2019 +0200
@@ -286,7 +286,6 @@
_external_suspend = 0x20000000U, // thread is asked to self suspend
_ext_suspended = 0x40000000U, // thread has self-suspended
- _deopt_suspend = 0x10000000U, // thread needs to self suspend for deopt
_has_async_exception = 0x00000001U, // there is a pending async exception
_critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock
@@ -1405,17 +1404,13 @@
inline void set_external_suspend();
inline void clear_external_suspend();
- inline void set_deopt_suspend();
- inline void clear_deopt_suspend();
- bool is_deopt_suspend() { return (_suspend_flags & _deopt_suspend) != 0; }
-
bool is_external_suspend() const {
return (_suspend_flags & _external_suspend) != 0;
}
// Whenever a thread transitions from native to vm/java it must suspend
// if external|deopt suspend is present.
bool is_suspend_after_native() const {
- return (_suspend_flags & (_external_suspend | _deopt_suspend JFR_ONLY(| _trace_flag))) != 0;
+ return (_suspend_flags & (_external_suspend JFR_ONLY(| _trace_flag))) != 0;
}
// external suspend request is completed
--- a/src/hotspot/share/runtime/thread.inline.hpp Fri Apr 26 08:53:29 2019 +0100
+++ b/src/hotspot/share/runtime/thread.inline.hpp Fri Apr 26 10:18:47 2019 +0200
@@ -108,13 +108,6 @@
clear_suspend_flag(_external_suspend);
}
-inline void JavaThread::set_deopt_suspend() {
- set_suspend_flag(_deopt_suspend);
-}
-inline void JavaThread::clear_deopt_suspend() {
- clear_suspend_flag(_deopt_suspend);
-}
-
inline void JavaThread::set_pending_async_exception(oop e) {
_pending_async_exception = e;
_special_runtime_exit_condition = _async_exception;