1 /* |
|
2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP |
|
26 #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP |
|
27 |
|
28 #include <intrin.h> |
|
29 #include "runtime/atomic.hpp" |
|
30 #include "runtime/orderAccess.hpp" |
|
31 #include "runtime/os.hpp" |
|
32 |
|
33 // Compiler version last used for testing: Microsoft Visual Studio 2010 |
|
34 // Please update this information when this file changes |
|
35 |
|
36 // Implementation of class OrderAccess. |
|
37 |
|
38 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions |
|
39 inline void compiler_barrier() { |
|
40 _ReadWriteBarrier(); |
|
41 } |
|
42 |
|
43 // Note that in MSVC, volatile memory accesses are explicitly |
|
44 // guaranteed to have acquire release semantics (w.r.t. compiler |
|
45 // reordering) and therefore does not even need a compiler barrier |
|
46 // for normal acquire release accesses. And all generalized |
|
47 // bound calls like release_store go through OrderAccess::load |
|
48 // and OrderAccess::store which do volatile memory accesses. |
|
49 template<> inline void ScopedFence<X_ACQUIRE>::postfix() { } |
|
50 template<> inline void ScopedFence<RELEASE_X>::prefix() { } |
|
51 template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { } |
|
52 template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); } |
|
53 |
|
54 inline void OrderAccess::loadload() { compiler_barrier(); } |
|
55 inline void OrderAccess::storestore() { compiler_barrier(); } |
|
56 inline void OrderAccess::loadstore() { compiler_barrier(); } |
|
57 inline void OrderAccess::storeload() { fence(); } |
|
58 |
|
59 inline void OrderAccess::acquire() { compiler_barrier(); } |
|
60 inline void OrderAccess::release() { compiler_barrier(); } |
|
61 |
|
62 inline void OrderAccess::fence() { |
|
63 #ifdef AMD64 |
|
64 StubRoutines_fence(); |
|
65 #else |
|
66 if (os::is_MP()) { |
|
67 __asm { |
|
68 lock add dword ptr [esp], 0; |
|
69 } |
|
70 } |
|
71 #endif // AMD64 |
|
72 compiler_barrier(); |
|
73 } |
|
74 |
|
75 #ifndef AMD64 |
|
76 template<> |
|
77 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> |
|
78 { |
|
79 template <typename T> |
|
80 void operator()(T v, volatile T* p) const { |
|
81 __asm { |
|
82 mov edx, p; |
|
83 mov al, v; |
|
84 xchg al, byte ptr [edx]; |
|
85 } |
|
86 } |
|
87 }; |
|
88 |
|
89 template<> |
|
90 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> |
|
91 { |
|
92 template <typename T> |
|
93 void operator()(T v, volatile T* p) const { |
|
94 __asm { |
|
95 mov edx, p; |
|
96 mov ax, v; |
|
97 xchg ax, word ptr [edx]; |
|
98 } |
|
99 } |
|
100 }; |
|
101 |
|
102 template<> |
|
103 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> |
|
104 { |
|
105 template <typename T> |
|
106 void operator()(T v, volatile T* p) const { |
|
107 __asm { |
|
108 mov edx, p; |
|
109 mov eax, v; |
|
110 xchg eax, dword ptr [edx]; |
|
111 } |
|
112 } |
|
113 }; |
|
114 #endif // AMD64 |
|
115 |
|
116 #endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP |
|