8220351: Cross-modifying code
authorrehn
Thu, 28 Mar 2019 11:08:23 +0100
changeset 54323 846bc643f4ef
parent 54322 cf75ea6af695
child 54324 9d5c84b0a598
8220351: Cross-modifying code Reviewed-by: rrich, mdoerr, dholmes, eosterlund
src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp
src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp
src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.hpp
src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp
src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp
src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp
src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp
src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.hpp
src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp
src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.hpp
src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.hpp
src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.hpp
src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp
src/hotspot/share/runtime/handshake.cpp
src/hotspot/share/runtime/interfaceSupport.inline.hpp
src/hotspot/share/runtime/orderAccess.hpp
src/hotspot/share/runtime/safepoint.cpp
src/hotspot/share/runtime/safepointMechanism.cpp
src/hotspot/share/runtime/safepointMechanism.hpp
src/hotspot/share/runtime/safepointMechanism.inline.hpp
src/hotspot/share/runtime/thread.cpp
--- a/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -77,6 +77,8 @@
 inline void OrderAccess::acquire()    { inlasm_lwsync(); }
 inline void OrderAccess::release()    { inlasm_lwsync(); }
 inline void OrderAccess::fence()      { inlasm_sync();   }
+inline void OrderAccess::cross_modify_fence()
+                                      { inlasm_isync();  }
 
 template<size_t byte_size>
 struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
--- a/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -59,6 +59,11 @@
   compiler_barrier();
 }
 
+inline void OrderAccess::cross_modify_fence() {
+  int idx = 0;
+  __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
+}
+
 template<>
 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
 {
--- a/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -73,5 +73,6 @@
 inline void OrderAccess::acquire()    { LIGHT_MEM_BARRIER; }
 inline void OrderAccess::release()    { LIGHT_MEM_BARRIER; }
 inline void OrderAccess::fence()      { FULL_MEM_BARRIER;  }
+inline void OrderAccess::cross_modify_fence()            { }
 
 #endif // OS_CPU_BSD_ZERO_ORDERACCESS_BSD_ZERO_HPP
--- a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -49,6 +49,8 @@
   FULL_MEM_BARRIER;
 }
 
+inline void OrderAccess::cross_modify_fence() { }
+
 template<size_t byte_size>
 struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
 {
--- a/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -101,5 +101,6 @@
 inline void OrderAccess::storeload()  { dmb_sy(); }
 inline void OrderAccess::release()    { dmb_sy(); }
 inline void OrderAccess::fence()      { dmb_sy(); }
+inline void OrderAccess::cross_modify_fence()   { }
 
 #endif // OS_CPU_LINUX_ARM_ORDERACCESS_LINUX_ARM_HPP
--- a/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -79,7 +79,8 @@
 inline void   OrderAccess::acquire()    { inlasm_lwsync(); }
 inline void   OrderAccess::release()    { inlasm_lwsync(); }
 inline void   OrderAccess::fence()      { inlasm_sync();   }
-
+inline void   OrderAccess::cross_modify_fence()
+                                        { inlasm_isync();  }
 
 template<size_t byte_size>
 struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
--- a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -74,6 +74,7 @@
 inline void OrderAccess::acquire()    { inlasm_zarch_acquire(); }
 inline void OrderAccess::release()    { inlasm_zarch_release(); }
 inline void OrderAccess::fence()      { inlasm_zarch_sync(); }
+inline void OrderAccess::cross_modify_fence() { inlasm_zarch_sync(); }
 
 template<size_t byte_size>
 struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
--- a/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -48,4 +48,6 @@
   __asm__ volatile ("membar  #StoreLoad" : : : "memory");
 }
 
+inline void OrderAccess::cross_modify_fence() { }
+
 #endif // OS_CPU_LINUX_SPARC_ORDERACCESS_LINUX_SPARC_HPP
--- a/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -55,6 +55,11 @@
   compiler_barrier();
 }
 
+inline void OrderAccess::cross_modify_fence() {
+  int idx = 0;
+  __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
+}
+
 template<>
 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
 {
--- a/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -82,5 +82,6 @@
 inline void OrderAccess::release()    { LIGHT_MEM_BARRIER; }
 
 inline void OrderAccess::fence()      { FULL_MEM_BARRIER;  }
+inline void OrderAccess::cross_modify_fence()            { }
 
 #endif // OS_CPU_LINUX_ZERO_ORDERACCESS_LINUX_ZERO_HPP
--- a/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -51,4 +51,6 @@
   __asm__ volatile ("membar  #StoreLoad" : : : "memory");
 }
 
+inline void OrderAccess::cross_modify_fence() { }
+
 #endif // OS_CPU_SOLARIS_SPARC_ORDERACCESS_SOLARIS_SPARC_HPP
--- a/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -54,4 +54,9 @@
   compiler_barrier();
 }
 
+inline void OrderAccess::cross_modify_fence() {
+  int idx = 0;
+  __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
+}
+
 #endif // OS_CPU_SOLARIS_X86_ORDERACCESS_SOLARIS_X86_HPP
--- a/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -69,6 +69,11 @@
   compiler_barrier();
 }
 
+inline void OrderAccess::cross_modify_fence() {
+  int regs[4];
+  __cpuid(regs, 0);
+}
+
 #ifndef AMD64
 template<>
 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
--- a/src/hotspot/share/runtime/handshake.cpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/share/runtime/handshake.cpp	Thu Mar 28 11:08:23 2019 +0100
@@ -281,7 +281,7 @@
 
 void HandshakeState::clear_handshake(JavaThread* target) {
   _operation = NULL;
-  SafepointMechanism::disarm_local_poll_release(target);
+  SafepointMechanism::disarm_if_needed(target, true /* release */);
 }
 
 void HandshakeState::process_self_inner(JavaThread* thread) {
--- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -282,6 +282,7 @@
   }
   ~ThreadBlockInVM() {
     trans_and_fence(_thread_blocked, _thread_in_vm);
+    OrderAccess::cross_modify_fence();
     // We don't need to clear_walkable because it will happen automagically when we return to java
   }
 };
@@ -336,6 +337,8 @@
 
     _thread->set_thread_state(_thread_in_vm);
     CHECK_UNHANDLED_OOPS_ONLY(_thread->clear_unhandled_oops();)
+
+    OrderAccess::cross_modify_fence();
   }
 };
 
--- a/src/hotspot/share/runtime/orderAccess.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/share/runtime/orderAccess.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -266,6 +266,8 @@
   static void     release();
   static void     fence();
 
+  static void     cross_modify_fence();
+
   template <typename T>
   static T        load_acquire(const volatile T* p);
 
--- a/src/hotspot/share/runtime/safepoint.cpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/share/runtime/safepoint.cpp	Thu Mar 28 11:08:23 2019 +0100
@@ -477,7 +477,8 @@
       assert(!cur_state->is_running(), "Thread not suspended at safepoint");
       cur_state->restart(); // TSS _running
       assert(cur_state->is_running(), "safepoint state has not been reset");
-      SafepointMechanism::disarm_local_poll(current);
+
+      SafepointMechanism::disarm_if_needed(current, false /* NO release */);
     }
   } // ~JavaThreadIteratorWithHandle
 
@@ -716,8 +717,6 @@
 }
 
 bool SafepointSynchronize::handshake_safe(JavaThread *thread) {
-  // The polls must be armed otherwise the safe state can change to unsafe at any time.
-  assert(SafepointMechanism::should_block(thread), "Must be armed");
   // This function must be called with the Threads_lock held so an externally
   // suspended thread cannot be resumed thus it is safe.
   assert(Threads_lock->owned_by_self() && Thread::current()->is_VM_thread(),
@@ -851,6 +850,9 @@
     thread->handle_special_runtime_exit_condition(
       !thread->is_at_poll_safepoint() && (state != _thread_in_native_trans));
   }
+
+  // cross_modify_fence is done by SafepointMechanism::block_if_requested_slow
+  // which is the only caller here.
 }
 
 // ------------------------------------------------------------------------------------------------------
--- a/src/hotspot/share/runtime/safepointMechanism.cpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/share/runtime/safepointMechanism.cpp	Thu Mar 28 11:08:23 2019 +0100
@@ -83,8 +83,7 @@
   }
 }
 
-void SafepointMechanism::block_if_requested_slow(JavaThread *thread) {
-  // local poll already checked, if used.
+void SafepointMechanism::block_or_handshake(JavaThread *thread) {
   if (global_poll()) {
     // Any load in ::block must not pass the global poll load.
     // Otherwise we might load an old safepoint counter (for example).
@@ -92,10 +91,31 @@
     SafepointSynchronize::block(thread);
   }
   if (uses_thread_local_poll() && thread->has_handshake()) {
-      thread->handshake_process_by_self();
+    thread->handshake_process_by_self();
   }
 }
 
+void SafepointMechanism::block_if_requested_slow(JavaThread *thread) {
+  // Read global poll and has_handshake after local poll
+  OrderAccess::loadload();
+
+  // local poll already checked, if used.
+  block_or_handshake(thread);
+
+  OrderAccess::loadload();
+
+  if (uses_thread_local_poll() && local_poll_armed(thread)) {
+    disarm_local_poll_release(thread);
+    // We might have disarmed next safepoint/handshake
+    OrderAccess::storeload();
+    if (global_poll() || thread->has_handshake()) {
+      arm_local_poll(thread);
+    }
+  }
+
+  OrderAccess::cross_modify_fence();
+}
+
 void SafepointMechanism::initialize_header(JavaThread* thread) {
   disarm_local_poll(thread);
 }
--- a/src/hotspot/share/runtime/safepointMechanism.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/share/runtime/safepointMechanism.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -46,9 +46,13 @@
 
   static inline bool local_poll_armed(JavaThread* thread);
 
+  static inline void disarm_local_poll(JavaThread* thread);
+  static inline void disarm_local_poll_release(JavaThread* thread);
+
   static inline bool local_poll(Thread* thread);
   static inline bool global_poll();
 
+  static void block_or_handshake(JavaThread *thread);
   static void block_if_requested_slow(JavaThread *thread);
 
   static void default_initialize();
@@ -80,10 +84,10 @@
 
   // Caller is responsible for using a memory barrier if needed.
   static inline void arm_local_poll(JavaThread* thread);
-  static inline void disarm_local_poll(JavaThread* thread);
-
+  // Release semantics
   static inline void arm_local_poll_release(JavaThread* thread);
-  static inline void disarm_local_poll_release(JavaThread* thread);
+  // Optional release
+  static inline void disarm_if_needed(JavaThread* thread, bool memory_order_release);
 
   // Setup the selected safepoint mechanism
   static void initialize();
--- a/src/hotspot/share/runtime/safepointMechanism.inline.hpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/share/runtime/safepointMechanism.inline.hpp	Thu Mar 28 11:08:23 2019 +0100
@@ -56,7 +56,7 @@
 }
 
 void SafepointMechanism::block_if_requested(JavaThread *thread) {
-  if (uses_thread_local_poll() && !SafepointMechanism::local_poll_armed(thread)) {
+  if (uses_thread_local_poll() && !local_poll_armed(thread)) {
     return;
   }
   block_if_requested_slow(thread);
@@ -70,6 +70,19 @@
   thread->set_polling_page(poll_disarmed_value());
 }
 
+void SafepointMechanism::disarm_if_needed(JavaThread* thread, bool memory_order_release) {
+  JavaThreadState jts = thread->thread_state();
+  if (jts == _thread_in_native || jts == _thread_in_native_trans) {
+    // JavaThread will disarm itself and execute cross_modify_fence() before continuing
+    return;
+  }
+  if (memory_order_release) {
+    thread->set_polling_page_release(poll_disarmed_value());
+  } else {
+    thread->set_polling_page(poll_disarmed_value());
+  }
+}
+
 void SafepointMechanism::arm_local_poll_release(JavaThread* thread) {
   thread->set_polling_page_release(poll_armed_value());
 }
--- a/src/hotspot/share/runtime/thread.cpp	Mon Mar 25 09:35:40 2019 +0100
+++ b/src/hotspot/share/runtime/thread.cpp	Thu Mar 28 11:08:23 2019 +0100
@@ -1836,6 +1836,10 @@
   // Thread is now sufficiently initialized to be handled by the safepoint code as being
   // in the VM. Change thread state from _thread_new to _thread_in_vm
   ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm);
+  // Before a thread is on the threads list it is always safe, so after leaving the
+  // _thread_new we should emit a instruction barrier. The distance to modified code
+  // from here is probably far enough, but this is consistent and safe.
+  OrderAccess::cross_modify_fence();
 
   assert(JavaThread::current() == this, "sanity check");
   assert(!Thread::current()->owns_locks(), "sanity check");
@@ -2439,7 +2443,6 @@
       this->SR_lock()->wait(Mutex::_no_safepoint_check_flag);
     }
   }
-
   return ret;
 }
 
@@ -2467,6 +2470,9 @@
   set_thread_state(_thread_blocked);
   java_suspend_self();
   set_thread_state(state);
+  // Since we are not using a regular thread-state transition helper here,
+  // we must manually emit the instruction barrier after leaving a safe state.
+  OrderAccess::cross_modify_fence();
   InterfaceSupport::serialize_thread_state_with_handler(this);
   if (state != _thread_in_native) {
     SafepointMechanism::block_if_requested(this);