src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp
changeset 59247 56bf71d64d51
parent 54323 846bc643f4ef
equal deleted inserted replaced
59246:fcad92f425c5 59247:56bf71d64d51
    37 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
    37 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
    38 inline void compiler_barrier() {
    38 inline void compiler_barrier() {
    39   _ReadWriteBarrier();
    39   _ReadWriteBarrier();
    40 }
    40 }
    41 
    41 
    42 // Note that in MSVC, volatile memory accesses are explicitly
       
    43 // guaranteed to have acquire release semantics (w.r.t. compiler
       
    44 // reordering) and therefore does not even need a compiler barrier
       
    45 // for normal acquire release accesses. And all generalized
       
    46 // bound calls like release_store go through OrderAccess::load
       
    47 // and OrderAccess::store which do volatile memory accesses.
       
    48 template<> inline void ScopedFence<X_ACQUIRE>::postfix()       { }
       
    49 template<> inline void ScopedFence<RELEASE_X>::prefix()        { }
       
    50 template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix()  { }
       
    51 template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
       
    52 
       
    53 inline void OrderAccess::loadload()   { compiler_barrier(); }
    42 inline void OrderAccess::loadload()   { compiler_barrier(); }
    54 inline void OrderAccess::storestore() { compiler_barrier(); }
    43 inline void OrderAccess::storestore() { compiler_barrier(); }
    55 inline void OrderAccess::loadstore()  { compiler_barrier(); }
    44 inline void OrderAccess::loadstore()  { compiler_barrier(); }
    56 inline void OrderAccess::storeload()  { fence(); }
    45 inline void OrderAccess::storeload()  { fence(); }
    57 
    46 
    72 inline void OrderAccess::cross_modify_fence() {
    61 inline void OrderAccess::cross_modify_fence() {
    73   int regs[4];
    62   int regs[4];
    74   __cpuid(regs, 0);
    63   __cpuid(regs, 0);
    75 }
    64 }
    76 
    65 
    77 #ifndef AMD64
       
    78 template<>
       
    79 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
       
    80 {
       
    81   template <typename T>
       
    82   void operator()(T v, volatile T* p) const {
       
    83     __asm {
       
    84       mov edx, p;
       
    85       mov al, v;
       
    86       xchg al, byte ptr [edx];
       
    87     }
       
    88   }
       
    89 };
       
    90 
       
    91 template<>
       
    92 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
       
    93 {
       
    94   template <typename T>
       
    95   void operator()(T v, volatile T* p) const {
       
    96     __asm {
       
    97       mov edx, p;
       
    98       mov ax, v;
       
    99       xchg ax, word ptr [edx];
       
   100     }
       
   101   }
       
   102 };
       
   103 
       
   104 template<>
       
   105 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
       
   106 {
       
   107   template <typename T>
       
   108   void operator()(T v, volatile T* p) const {
       
   109     __asm {
       
   110       mov edx, p;
       
   111       mov eax, v;
       
   112       xchg eax, dword ptr [edx];
       
   113     }
       
   114   }
       
   115 };
       
   116 #endif // AMD64
       
   117 
       
   118 #endif // OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP
    66 #endif // OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP