diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp --- a/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -39,17 +39,6 @@ _ReadWriteBarrier(); } -// Note that in MSVC, volatile memory accesses are explicitly -// guaranteed to have acquire release semantics (w.r.t. compiler -// reordering) and therefore does not even need a compiler barrier -// for normal acquire release accesses. And all generalized -// bound calls like release_store go through OrderAccess::load -// and OrderAccess::store which do volatile memory accesses. -template<> inline void ScopedFence::postfix() { } -template<> inline void ScopedFence::prefix() { } -template<> inline void ScopedFence::prefix() { } -template<> inline void ScopedFence::postfix() { OrderAccess::fence(); } - inline void OrderAccess::loadload() { compiler_barrier(); } inline void OrderAccess::storestore() { compiler_barrier(); } inline void OrderAccess::loadstore() { compiler_barrier(); } @@ -74,45 +63,4 @@ __cpuid(regs, 0); } -#ifndef AMD64 -template<> -struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm { - mov edx, p; - mov al, v; - xchg al, byte ptr [edx]; - } - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm { - mov edx, p; - mov ax, v; - xchg ax, word ptr [edx]; - } - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm { - mov edx, p; - mov eax, v; - xchg eax, dword ptr [edx]; - } - } -}; -#endif // AMD64 - #endif // OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP