10565
|
1 |
/*
|
|
2 |
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* Copyright 2007, 2008, 2011 Red Hat, Inc.
|
|
4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
5 |
*
|
|
6 |
* This code is free software; you can redistribute it and/or modify it
|
|
7 |
* under the terms of the GNU General Public License version 2 only, as
|
|
8 |
* published by the Free Software Foundation.
|
|
9 |
*
|
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
14 |
* accompanied this code).
|
|
15 |
*
|
|
16 |
* You should have received a copy of the GNU General Public License version
|
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
19 |
*
|
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
21 |
* or visit www.oracle.com if you need additional information or have any
|
|
22 |
* questions.
|
|
23 |
*
|
|
24 |
*/
|
|
25 |
|
|
26 |
#ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
|
|
27 |
#define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
|
|
28 |
|
|
29 |
#include "orderAccess_bsd_zero.inline.hpp"
|
|
30 |
#include "runtime/atomic.hpp"
|
|
31 |
#include "runtime/os.hpp"
|
|
32 |
#include "vm_version_zero.hpp"
|
|
33 |
|
|
34 |
// Implementation of class atomic
|
|
35 |
|
|
36 |
#ifdef M68K
|
|
37 |
|
|
38 |
/*
|
|
39 |
* __m68k_cmpxchg
|
|
40 |
*
|
|
41 |
* Atomically store newval in *ptr if *ptr is equal to oldval for user space.
|
|
42 |
* Returns newval on success and oldval if no exchange happened.
|
|
43 |
* This implementation is processor specific and works on
|
|
44 |
* 68020 68030 68040 and 68060.
|
|
45 |
*
|
|
46 |
* It will not work on ColdFire, 68000 and 68010 since they lack the CAS
|
|
47 |
* instruction.
|
|
48 |
* Using a kernelhelper would be better for arch complete implementation.
|
|
49 |
*
|
|
50 |
*/
|
|
51 |
|
|
52 |
static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
|
|
53 |
int ret;
|
|
54 |
__asm __volatile ("cas%.l %0,%2,%1"
|
|
55 |
: "=d" (ret), "+m" (*(ptr))
|
|
56 |
: "d" (newval), "0" (oldval));
|
|
57 |
return ret;
|
|
58 |
}
|
|
59 |
|
|
60 |
/* Perform an atomic compare and swap: if the current value of `*PTR'
|
|
61 |
is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
|
|
62 |
`*PTR' before the operation.*/
|
|
63 |
static inline int m68k_compare_and_swap(volatile int *ptr,
|
|
64 |
int oldval,
|
|
65 |
int newval) {
|
|
66 |
for (;;) {
|
|
67 |
int prev = *ptr;
|
|
68 |
if (prev != oldval)
|
|
69 |
return prev;
|
|
70 |
|
|
71 |
if (__m68k_cmpxchg (prev, newval, ptr) == newval)
|
|
72 |
// Success.
|
|
73 |
return prev;
|
|
74 |
|
|
75 |
// We failed even though prev == oldval. Try again.
|
|
76 |
}
|
|
77 |
}
|
|
78 |
|
|
79 |
/* Atomically add an int to memory. */
|
|
80 |
static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
|
|
81 |
for (;;) {
|
|
82 |
// Loop until success.
|
|
83 |
|
|
84 |
int prev = *ptr;
|
|
85 |
|
|
86 |
if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
|
|
87 |
return prev + add_value;
|
|
88 |
}
|
|
89 |
}
|
|
90 |
|
|
91 |
/* Atomically write VALUE into `*PTR' and returns the previous
|
|
92 |
contents of `*PTR'. */
|
|
93 |
static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
|
|
94 |
for (;;) {
|
|
95 |
// Loop until success.
|
|
96 |
int prev = *ptr;
|
|
97 |
|
|
98 |
if (__m68k_cmpxchg (prev, newval, ptr) == prev)
|
|
99 |
return prev;
|
|
100 |
}
|
|
101 |
}
|
|
102 |
#endif // M68K
|
|
103 |
|
|
104 |
#ifdef ARM
|
|
105 |
|
|
106 |
/*
|
|
107 |
* __kernel_cmpxchg
|
|
108 |
*
|
|
109 |
* Atomically store newval in *ptr if *ptr is equal to oldval for user space.
|
|
110 |
* Return zero if *ptr was changed or non-zero if no exchange happened.
|
|
111 |
* The C flag is also set if *ptr was changed to allow for assembly
|
|
112 |
* optimization in the calling code.
|
|
113 |
*
|
|
114 |
*/
|
|
115 |
|
|
116 |
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
|
|
117 |
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
|
|
118 |
|
|
119 |
|
|
120 |
|
|
121 |
/* Perform an atomic compare and swap: if the current value of `*PTR'
|
|
122 |
is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
|
|
123 |
`*PTR' before the operation.*/
|
|
124 |
static inline int arm_compare_and_swap(volatile int *ptr,
|
|
125 |
int oldval,
|
|
126 |
int newval) {
|
|
127 |
for (;;) {
|
|
128 |
int prev = *ptr;
|
|
129 |
if (prev != oldval)
|
|
130 |
return prev;
|
|
131 |
|
|
132 |
if (__kernel_cmpxchg (prev, newval, ptr) == 0)
|
|
133 |
// Success.
|
|
134 |
return prev;
|
|
135 |
|
|
136 |
// We failed even though prev == oldval. Try again.
|
|
137 |
}
|
|
138 |
}
|
|
139 |
|
|
140 |
/* Atomically add an int to memory. */
|
|
141 |
static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
|
|
142 |
for (;;) {
|
|
143 |
// Loop until a __kernel_cmpxchg succeeds.
|
|
144 |
|
|
145 |
int prev = *ptr;
|
|
146 |
|
|
147 |
if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
|
|
148 |
return prev + add_value;
|
|
149 |
}
|
|
150 |
}
|
|
151 |
|
|
152 |
/* Atomically write VALUE into `*PTR' and returns the previous
|
|
153 |
contents of `*PTR'. */
|
|
154 |
static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
|
|
155 |
for (;;) {
|
|
156 |
// Loop until a __kernel_cmpxchg succeeds.
|
|
157 |
int prev = *ptr;
|
|
158 |
|
|
159 |
if (__kernel_cmpxchg (prev, newval, ptr) == 0)
|
|
160 |
return prev;
|
|
161 |
}
|
|
162 |
}
|
|
163 |
#endif // ARM
|
|
164 |
|
|
165 |
inline void Atomic::store(jint store_value, volatile jint* dest) {
|
|
166 |
#if !defined(ARM) && !defined(M68K)
|
|
167 |
__sync_synchronize();
|
|
168 |
#endif
|
|
169 |
*dest = store_value;
|
|
170 |
}
|
|
171 |
|
|
172 |
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
|
|
173 |
#if !defined(ARM) && !defined(M68K)
|
|
174 |
__sync_synchronize();
|
|
175 |
#endif
|
|
176 |
*dest = store_value;
|
|
177 |
}
|
|
178 |
|
|
179 |
inline jint Atomic::add(jint add_value, volatile jint* dest) {
|
|
180 |
#ifdef ARM
|
|
181 |
return arm_add_and_fetch(dest, add_value);
|
|
182 |
#else
|
|
183 |
#ifdef M68K
|
|
184 |
return m68k_add_and_fetch(dest, add_value);
|
|
185 |
#else
|
|
186 |
return __sync_add_and_fetch(dest, add_value);
|
|
187 |
#endif // M68K
|
|
188 |
#endif // ARM
|
|
189 |
}
|
|
190 |
|
|
191 |
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
|
192 |
#ifdef ARM
|
|
193 |
return arm_add_and_fetch(dest, add_value);
|
|
194 |
#else
|
|
195 |
#ifdef M68K
|
|
196 |
return m68k_add_and_fetch(dest, add_value);
|
|
197 |
#else
|
|
198 |
return __sync_add_and_fetch(dest, add_value);
|
|
199 |
#endif // M68K
|
|
200 |
#endif // ARM
|
|
201 |
}
|
|
202 |
|
|
203 |
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
|
204 |
return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
|
|
205 |
}
|
|
206 |
|
|
207 |
inline void Atomic::inc(volatile jint* dest) {
|
|
208 |
add(1, dest);
|
|
209 |
}
|
|
210 |
|
|
211 |
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
|
|
212 |
add_ptr(1, dest);
|
|
213 |
}
|
|
214 |
|
|
215 |
inline void Atomic::inc_ptr(volatile void* dest) {
|
|
216 |
add_ptr(1, dest);
|
|
217 |
}
|
|
218 |
|
|
219 |
inline void Atomic::dec(volatile jint* dest) {
|
|
220 |
add(-1, dest);
|
|
221 |
}
|
|
222 |
|
|
223 |
inline void Atomic::dec_ptr(volatile intptr_t* dest) {
|
|
224 |
add_ptr(-1, dest);
|
|
225 |
}
|
|
226 |
|
|
227 |
inline void Atomic::dec_ptr(volatile void* dest) {
|
|
228 |
add_ptr(-1, dest);
|
|
229 |
}
|
|
230 |
|
|
231 |
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
|
232 |
#ifdef ARM
|
|
233 |
return arm_lock_test_and_set(dest, exchange_value);
|
|
234 |
#else
|
|
235 |
#ifdef M68K
|
|
236 |
return m68k_lock_test_and_set(dest, exchange_value);
|
|
237 |
#else
|
|
238 |
// __sync_lock_test_and_set is a bizarrely named atomic exchange
|
|
239 |
// operation. Note that some platforms only support this with the
|
|
240 |
// limitation that the only valid value to store is the immediate
|
|
241 |
// constant 1. There is a test for this in JNI_CreateJavaVM().
|
|
242 |
return __sync_lock_test_and_set (dest, exchange_value);
|
|
243 |
#endif // M68K
|
|
244 |
#endif // ARM
|
|
245 |
}
|
|
246 |
|
|
247 |
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
|
|
248 |
volatile intptr_t* dest) {
|
|
249 |
#ifdef ARM
|
|
250 |
return arm_lock_test_and_set(dest, exchange_value);
|
|
251 |
#else
|
|
252 |
#ifdef M68K
|
|
253 |
return m68k_lock_test_and_set(dest, exchange_value);
|
|
254 |
#else
|
|
255 |
return __sync_lock_test_and_set (dest, exchange_value);
|
|
256 |
#endif // M68K
|
|
257 |
#endif // ARM
|
|
258 |
}
|
|
259 |
|
|
260 |
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
|
261 |
return (void *) xchg_ptr((intptr_t) exchange_value,
|
|
262 |
(volatile intptr_t*) dest);
|
|
263 |
}
|
|
264 |
|
|
265 |
inline jint Atomic::cmpxchg(jint exchange_value,
|
|
266 |
volatile jint* dest,
|
|
267 |
jint compare_value) {
|
|
268 |
#ifdef ARM
|
|
269 |
return arm_compare_and_swap(dest, compare_value, exchange_value);
|
|
270 |
#else
|
|
271 |
#ifdef M68K
|
|
272 |
return m68k_compare_and_swap(dest, compare_value, exchange_value);
|
|
273 |
#else
|
|
274 |
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
|
275 |
#endif // M68K
|
|
276 |
#endif // ARM
|
|
277 |
}
|
|
278 |
|
|
279 |
inline jlong Atomic::cmpxchg(jlong exchange_value,
|
|
280 |
volatile jlong* dest,
|
|
281 |
jlong compare_value) {
|
|
282 |
|
|
283 |
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
|
284 |
}
|
|
285 |
|
|
286 |
inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
|
|
287 |
volatile intptr_t* dest,
|
|
288 |
intptr_t compare_value) {
|
|
289 |
#ifdef ARM
|
|
290 |
return arm_compare_and_swap(dest, compare_value, exchange_value);
|
|
291 |
#else
|
|
292 |
#ifdef M68K
|
|
293 |
return m68k_compare_and_swap(dest, compare_value, exchange_value);
|
|
294 |
#else
|
|
295 |
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
|
296 |
#endif // M68K
|
|
297 |
#endif // ARM
|
|
298 |
}
|
|
299 |
|
|
300 |
inline void* Atomic::cmpxchg_ptr(void* exchange_value,
|
|
301 |
volatile void* dest,
|
|
302 |
void* compare_value) {
|
|
303 |
|
|
304 |
return (void *) cmpxchg_ptr((intptr_t) exchange_value,
|
|
305 |
(volatile intptr_t*) dest,
|
|
306 |
(intptr_t) compare_value);
|
|
307 |
}
|
|
308 |
|
|
309 |
inline jlong Atomic::load(volatile jlong* src) {
|
|
310 |
volatile jlong dest;
|
|
311 |
os::atomic_copy64(src, &dest);
|
|
312 |
return dest;
|
|
313 |
}
|
|
314 |
|
|
315 |
inline void Atomic::store(jlong store_value, jlong* dest) {
|
|
316 |
os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
|
|
317 |
}
|
|
318 |
|
|
319 |
inline void Atomic::store(jlong store_value, volatile jlong* dest) {
|
|
320 |
os::atomic_copy64((volatile jlong*)&store_value, dest);
|
|
321 |
}
|
|
322 |
|
|
323 |
#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
|