1 /* |
|
2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP |
|
26 #define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP |
|
27 |
|
28 #include "runtime/atomic.hpp" |
|
29 #include "runtime/orderAccess.hpp" |
|
30 #include "runtime/os.hpp" |
|
31 |
|
32 // Compiler version last used for testing: clang 5.1 |
|
33 // Please update this information when this file changes |
|
34 |
|
35 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions |
|
36 static inline void compiler_barrier() { |
|
37 __asm__ volatile ("" : : : "memory"); |
|
38 } |
|
39 |
|
40 // x86 is TSO and hence only needs a fence for storeload |
|
41 // However, a compiler barrier is still needed to prevent reordering |
|
42 // between volatile and non-volatile memory accesses. |
|
43 |
|
44 // Implementation of class OrderAccess. |
|
45 |
|
46 inline void OrderAccess::loadload() { compiler_barrier(); } |
|
47 inline void OrderAccess::storestore() { compiler_barrier(); } |
|
48 inline void OrderAccess::loadstore() { compiler_barrier(); } |
|
49 inline void OrderAccess::storeload() { fence(); } |
|
50 |
|
51 inline void OrderAccess::acquire() { compiler_barrier(); } |
|
52 inline void OrderAccess::release() { compiler_barrier(); } |
|
53 |
|
54 inline void OrderAccess::fence() { |
|
55 if (os::is_MP()) { |
|
56 // always use locked addl since mfence is sometimes expensive |
|
57 #ifdef AMD64 |
|
58 __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); |
|
59 #else |
|
60 __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); |
|
61 #endif |
|
62 } |
|
63 compiler_barrier(); |
|
64 } |
|
65 |
|
66 template<> |
|
67 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> |
|
68 { |
|
69 template <typename T> |
|
70 void operator()(T v, volatile T* p) const { |
|
71 __asm__ volatile ( "xchgb (%2),%0" |
|
72 : "=q" (v) |
|
73 : "0" (v), "r" (p) |
|
74 : "memory"); |
|
75 } |
|
76 }; |
|
77 |
|
78 template<> |
|
79 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> |
|
80 { |
|
81 template <typename T> |
|
82 void operator()(T v, volatile T* p) const { |
|
83 __asm__ volatile ( "xchgw (%2),%0" |
|
84 : "=r" (v) |
|
85 : "0" (v), "r" (p) |
|
86 : "memory"); |
|
87 } |
|
88 }; |
|
89 |
|
90 template<> |
|
91 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> |
|
92 { |
|
93 template <typename T> |
|
94 void operator()(T v, volatile T* p) const { |
|
95 __asm__ volatile ( "xchgl (%2),%0" |
|
96 : "=r" (v) |
|
97 : "0" (v), "r" (p) |
|
98 : "memory"); |
|
99 } |
|
100 }; |
|
101 |
|
102 #ifdef AMD64 |
|
103 template<> |
|
104 struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> |
|
105 { |
|
106 template <typename T> |
|
107 void operator()(T v, volatile T* p) const { |
|
108 __asm__ volatile ( "xchgq (%2), %0" |
|
109 : "=r" (v) |
|
110 : "0" (v), "r" (p) |
|
111 : "memory"); |
|
112 } |
|
113 }; |
|
114 #endif // AMD64 |
|
115 |
|
116 #endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP |
|