1 /* |
|
2 * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. |
|
3 * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. |
|
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
5 * |
|
6 * This code is free software; you can redistribute it and/or modify it |
|
7 * under the terms of the GNU General Public License version 2 only, as |
|
8 * published by the Free Software Foundation. |
|
9 * |
|
10 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 * version 2 for more details (a copy is included in the LICENSE file that |
|
14 * accompanied this code). |
|
15 * |
|
16 * You should have received a copy of the GNU General Public License version |
|
17 * 2 along with this work; if not, write to the Free Software Foundation, |
|
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 * |
|
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
21 * or visit www.oracle.com if you need additional information or have any |
|
22 * questions. |
|
23 * |
|
24 */ |
|
25 |
|
26 #ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP |
|
27 #define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP |
|
28 |
|
29 #include "runtime/atomic.hpp" |
|
30 #include "runtime/os.hpp" |
|
31 |
|
32 // Implementation of class atomic |
|
33 |
|
34 #ifdef M68K |
|
35 |
|
36 /* |
|
37 * __m68k_cmpxchg |
|
38 * |
|
39 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. |
|
40 * Returns newval on success and oldval if no exchange happened. |
|
41 * This implementation is processor specific and works on |
|
42 * 68020 68030 68040 and 68060. |
|
43 * |
|
44 * It will not work on ColdFire, 68000 and 68010 since they lack the CAS |
|
45 * instruction. |
|
46 * Using a kernelhelper would be better for arch complete implementation. |
|
47 * |
|
48 */ |
|
49 |
|
50 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) { |
|
51 int ret; |
|
52 __asm __volatile ("cas%.l %0,%2,%1" |
|
53 : "=d" (ret), "+m" (*(ptr)) |
|
54 : "d" (newval), "0" (oldval)); |
|
55 return ret; |
|
56 } |
|
57 |
|
58 /* Perform an atomic compare and swap: if the current value of `*PTR' |
|
59 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of |
|
60 `*PTR' before the operation.*/ |
|
61 static inline int m68k_compare_and_swap(volatile int *ptr, |
|
62 int oldval, |
|
63 int newval) { |
|
64 for (;;) { |
|
65 int prev = *ptr; |
|
66 if (prev != oldval) |
|
67 return prev; |
|
68 |
|
69 if (__m68k_cmpxchg (prev, newval, ptr) == newval) |
|
70 // Success. |
|
71 return prev; |
|
72 |
|
73 // We failed even though prev == oldval. Try again. |
|
74 } |
|
75 } |
|
76 |
|
77 /* Atomically add an int to memory. */ |
|
78 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) { |
|
79 for (;;) { |
|
80 // Loop until success. |
|
81 |
|
82 int prev = *ptr; |
|
83 |
|
84 if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value) |
|
85 return prev + add_value; |
|
86 } |
|
87 } |
|
88 |
|
89 /* Atomically write VALUE into `*PTR' and returns the previous |
|
90 contents of `*PTR'. */ |
|
91 static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) { |
|
92 for (;;) { |
|
93 // Loop until success. |
|
94 int prev = *ptr; |
|
95 |
|
96 if (__m68k_cmpxchg (prev, newval, ptr) == prev) |
|
97 return prev; |
|
98 } |
|
99 } |
|
100 #endif // M68K |
|
101 |
|
102 #ifdef ARM |
|
103 |
|
104 /* |
|
105 * __kernel_cmpxchg |
|
106 * |
|
107 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. |
|
108 * Return zero if *ptr was changed or non-zero if no exchange happened. |
|
109 * The C flag is also set if *ptr was changed to allow for assembly |
|
110 * optimization in the calling code. |
|
111 * |
|
112 */ |
|
113 |
|
114 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr); |
|
115 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0) |
|
116 |
|
117 |
|
118 |
|
119 /* Perform an atomic compare and swap: if the current value of `*PTR' |
|
120 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of |
|
121 `*PTR' before the operation.*/ |
|
122 static inline int arm_compare_and_swap(volatile int *ptr, |
|
123 int oldval, |
|
124 int newval) { |
|
125 for (;;) { |
|
126 int prev = *ptr; |
|
127 if (prev != oldval) |
|
128 return prev; |
|
129 |
|
130 if (__kernel_cmpxchg (prev, newval, ptr) == 0) |
|
131 // Success. |
|
132 return prev; |
|
133 |
|
134 // We failed even though prev == oldval. Try again. |
|
135 } |
|
136 } |
|
137 |
|
138 /* Atomically add an int to memory. */ |
|
139 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) { |
|
140 for (;;) { |
|
141 // Loop until a __kernel_cmpxchg succeeds. |
|
142 |
|
143 int prev = *ptr; |
|
144 |
|
145 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0) |
|
146 return prev + add_value; |
|
147 } |
|
148 } |
|
149 |
|
150 /* Atomically write VALUE into `*PTR' and returns the previous |
|
151 contents of `*PTR'. */ |
|
152 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) { |
|
153 for (;;) { |
|
154 // Loop until a __kernel_cmpxchg succeeds. |
|
155 int prev = *ptr; |
|
156 |
|
157 if (__kernel_cmpxchg (prev, newval, ptr) == 0) |
|
158 return prev; |
|
159 } |
|
160 } |
|
161 #endif // ARM |
|
162 |
|
163 inline void Atomic::store(jint store_value, volatile jint* dest) { |
|
164 #if !defined(ARM) && !defined(M68K) |
|
165 __sync_synchronize(); |
|
166 #endif |
|
167 *dest = store_value; |
|
168 } |
|
169 |
|
170 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { |
|
171 #if !defined(ARM) && !defined(M68K) |
|
172 __sync_synchronize(); |
|
173 #endif |
|
174 *dest = store_value; |
|
175 } |
|
176 |
|
177 inline jint Atomic::add(jint add_value, volatile jint* dest) { |
|
178 #ifdef ARM |
|
179 return arm_add_and_fetch(dest, add_value); |
|
180 #else |
|
181 #ifdef M68K |
|
182 return m68k_add_and_fetch(dest, add_value); |
|
183 #else |
|
184 return __sync_add_and_fetch(dest, add_value); |
|
185 #endif // M68K |
|
186 #endif // ARM |
|
187 } |
|
188 |
|
189 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { |
|
190 #ifdef ARM |
|
191 return arm_add_and_fetch(dest, add_value); |
|
192 #else |
|
193 #ifdef M68K |
|
194 return m68k_add_and_fetch(dest, add_value); |
|
195 #else |
|
196 return __sync_add_and_fetch(dest, add_value); |
|
197 #endif // M68K |
|
198 #endif // ARM |
|
199 } |
|
200 |
|
201 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { |
|
202 return (void *) add_ptr(add_value, (volatile intptr_t *) dest); |
|
203 } |
|
204 |
|
205 inline void Atomic::inc(volatile jint* dest) { |
|
206 add(1, dest); |
|
207 } |
|
208 |
|
209 inline void Atomic::inc_ptr(volatile intptr_t* dest) { |
|
210 add_ptr(1, dest); |
|
211 } |
|
212 |
|
213 inline void Atomic::inc_ptr(volatile void* dest) { |
|
214 add_ptr(1, dest); |
|
215 } |
|
216 |
|
217 inline void Atomic::dec(volatile jint* dest) { |
|
218 add(-1, dest); |
|
219 } |
|
220 |
|
221 inline void Atomic::dec_ptr(volatile intptr_t* dest) { |
|
222 add_ptr(-1, dest); |
|
223 } |
|
224 |
|
225 inline void Atomic::dec_ptr(volatile void* dest) { |
|
226 add_ptr(-1, dest); |
|
227 } |
|
228 |
|
229 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { |
|
230 #ifdef ARM |
|
231 return arm_lock_test_and_set(dest, exchange_value); |
|
232 #else |
|
233 #ifdef M68K |
|
234 return m68k_lock_test_and_set(dest, exchange_value); |
|
235 #else |
|
236 // __sync_lock_test_and_set is a bizarrely named atomic exchange |
|
237 // operation. Note that some platforms only support this with the |
|
238 // limitation that the only valid value to store is the immediate |
|
239 // constant 1. There is a test for this in JNI_CreateJavaVM(). |
|
240 jint result = __sync_lock_test_and_set (dest, exchange_value); |
|
241 // All atomic operations are expected to be full memory barriers |
|
242 // (see atomic.hpp). However, __sync_lock_test_and_set is not |
|
243 // a full memory barrier, but an acquire barrier. Hence, this added |
|
244 // barrier. |
|
245 __sync_synchronize(); |
|
246 return result; |
|
247 #endif // M68K |
|
248 #endif // ARM |
|
249 } |
|
250 |
|
251 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, |
|
252 volatile intptr_t* dest) { |
|
253 #ifdef ARM |
|
254 return arm_lock_test_and_set(dest, exchange_value); |
|
255 #else |
|
256 #ifdef M68K |
|
257 return m68k_lock_test_and_set(dest, exchange_value); |
|
258 #else |
|
259 intptr_t result = __sync_lock_test_and_set (dest, exchange_value); |
|
260 __sync_synchronize(); |
|
261 return result; |
|
262 #endif // M68K |
|
263 #endif // ARM |
|
264 } |
|
265 |
|
266 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { |
|
267 return (void *) xchg_ptr((intptr_t) exchange_value, |
|
268 (volatile intptr_t*) dest); |
|
269 } |
|
270 |
|
271 inline jint Atomic::cmpxchg(jint exchange_value, |
|
272 volatile jint* dest, |
|
273 jint compare_value, |
|
274 cmpxchg_memory_order order) { |
|
275 #ifdef ARM |
|
276 return arm_compare_and_swap(dest, compare_value, exchange_value); |
|
277 #else |
|
278 #ifdef M68K |
|
279 return m68k_compare_and_swap(dest, compare_value, exchange_value); |
|
280 #else |
|
281 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); |
|
282 #endif // M68K |
|
283 #endif // ARM |
|
284 } |
|
285 |
|
286 inline jlong Atomic::cmpxchg(jlong exchange_value, |
|
287 volatile jlong* dest, |
|
288 jlong compare_value, |
|
289 cmpxchg_memory_order order) { |
|
290 |
|
291 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); |
|
292 } |
|
293 |
|
294 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, |
|
295 volatile intptr_t* dest, |
|
296 intptr_t compare_value, |
|
297 cmpxchg_memory_order order) { |
|
298 #ifdef ARM |
|
299 return arm_compare_and_swap(dest, compare_value, exchange_value); |
|
300 #else |
|
301 #ifdef M68K |
|
302 return m68k_compare_and_swap(dest, compare_value, exchange_value); |
|
303 #else |
|
304 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); |
|
305 #endif // M68K |
|
306 #endif // ARM |
|
307 } |
|
308 |
|
309 inline void* Atomic::cmpxchg_ptr(void* exchange_value, |
|
310 volatile void* dest, |
|
311 void* compare_value, |
|
312 cmpxchg_memory_order order) { |
|
313 |
|
314 return (void *) cmpxchg_ptr((intptr_t) exchange_value, |
|
315 (volatile intptr_t*) dest, |
|
316 (intptr_t) compare_value, |
|
317 order); |
|
318 } |
|
319 |
|
320 inline jlong Atomic::load(volatile jlong* src) { |
|
321 volatile jlong dest; |
|
322 os::atomic_copy64(src, &dest); |
|
323 return dest; |
|
324 } |
|
325 |
|
326 inline void Atomic::store(jlong store_value, jlong* dest) { |
|
327 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest); |
|
328 } |
|
329 |
|
330 inline void Atomic::store(jlong store_value, volatile jlong* dest) { |
|
331 os::atomic_copy64((volatile jlong*)&store_value, dest); |
|
332 } |
|
333 |
|
334 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP |
|