24 |
24 |
25 #ifndef OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP |
25 #ifndef OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP |
26 #define OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP |
26 #define OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP |
27 |
27 |
28 #include "runtime/os.hpp" |
28 #include "runtime/os.hpp" |
|
29 |
|
30 // Note that in MSVC, volatile memory accesses are explicitly |
|
31 // guaranteed to have acquire release semantics (w.r.t. compiler |
|
32 // reordering) and therefore does not even need a compiler barrier |
|
33 // for normal acquire release accesses. And all generalized |
|
34 // bound calls like release_store go through Atomic::load |
|
35 // and Atomic::store which do volatile memory accesses. |
|
36 template<> inline void ScopedFence<X_ACQUIRE>::postfix() { } |
|
37 template<> inline void ScopedFence<RELEASE_X>::prefix() { } |
|
38 template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { } |
|
39 template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); } |
29 |
40 |
30 // The following alternative implementations are needed because |
41 // The following alternative implementations are needed because |
31 // Windows 95 doesn't support (some of) the corresponding Windows NT |
42 // Windows 95 doesn't support (some of) the corresponding Windows NT |
32 // calls. Furthermore, these versions allow inlining in the caller. |
43 // calls. Furthermore, these versions allow inlining in the caller. |
33 // (More precisely: The documentation for InterlockedExchange says |
44 // (More precisely: The documentation for InterlockedExchange says |
216 |
227 |
217 #endif // AMD64 |
228 #endif // AMD64 |
218 |
229 |
219 #pragma warning(default: 4035) // Enables warnings reporting missing return statement |
230 #pragma warning(default: 4035) // Enables warnings reporting missing return statement |
220 |
231 |
|
232 #ifndef AMD64 |
|
233 template<> |
|
234 struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> |
|
235 { |
|
236 template <typename T> |
|
237 void operator()(T v, volatile T* p) const { |
|
238 __asm { |
|
239 mov edx, p; |
|
240 mov al, v; |
|
241 xchg al, byte ptr [edx]; |
|
242 } |
|
243 } |
|
244 }; |
|
245 |
|
246 template<> |
|
247 struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> |
|
248 { |
|
249 template <typename T> |
|
250 void operator()(T v, volatile T* p) const { |
|
251 __asm { |
|
252 mov edx, p; |
|
253 mov ax, v; |
|
254 xchg ax, word ptr [edx]; |
|
255 } |
|
256 } |
|
257 }; |
|
258 |
|
259 template<> |
|
260 struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> |
|
261 { |
|
262 template <typename T> |
|
263 void operator()(T v, volatile T* p) const { |
|
264 __asm { |
|
265 mov edx, p; |
|
266 mov eax, v; |
|
267 xchg eax, dword ptr [edx]; |
|
268 } |
|
269 } |
|
270 }; |
|
271 #endif // AMD64 |
|
272 |
221 #endif // OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP |
273 #endif // OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP |