|
1 /* |
|
2 * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 * have any questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 // Implementation of class atomic |
|
26 |
|
27 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } |
|
28 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } |
|
29 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } |
|
30 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } |
|
31 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } |
|
32 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } |
|
33 |
|
34 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } |
|
35 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } |
|
36 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } |
|
37 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } |
|
38 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } |
|
39 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } |
|
40 |
|
41 inline void Atomic::inc (volatile jint* dest) { (void)add (1, dest); } |
|
42 inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); } |
|
43 inline void Atomic::inc_ptr(volatile void* dest) { (void)add_ptr(1, dest); } |
|
44 |
|
45 inline void Atomic::dec (volatile jint* dest) { (void)add (-1, dest); } |
|
46 inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); } |
|
47 inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } |
|
48 |
|
49 #ifdef _GNU_SOURCE |
|
50 |
|
51 inline jint Atomic::add (jint add_value, volatile jint* dest) { |
|
52 intptr_t rv; |
|
53 __asm__ volatile( |
|
54 "1: \n\t" |
|
55 " ld [%2], %%o2\n\t" |
|
56 " add %1, %%o2, %%o3\n\t" |
|
57 " cas [%2], %%o2, %%o3\n\t" |
|
58 " cmp %%o2, %%o3\n\t" |
|
59 " bne 1b\n\t" |
|
60 " nop\n\t" |
|
61 " add %1, %%o2, %0\n\t" |
|
62 : "=r" (rv) |
|
63 : "r" (add_value), "r" (dest) |
|
64 : "memory", "o2", "o3"); |
|
65 return rv; |
|
66 } |
|
67 |
|
68 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { |
|
69 intptr_t rv; |
|
70 #ifdef _LP64 |
|
71 __asm__ volatile( |
|
72 "1: \n\t" |
|
73 " ldx [%2], %%o2\n\t" |
|
74 " add %0, %%o2, %%o3\n\t" |
|
75 " casx [%2], %%o2, %%o3\n\t" |
|
76 " cmp %%o2, %%o3\n\t" |
|
77 " bne %%xcc, 1b\n\t" |
|
78 " nop\n\t" |
|
79 " add %0, %%o2, %0\n\t" |
|
80 : "=r" (rv) |
|
81 : "r" (add_value), "r" (dest) |
|
82 : "memory", "o2", "o3"); |
|
83 #else //_LP64 |
|
84 __asm__ volatile( |
|
85 "1: \n\t" |
|
86 " ld [%2], %%o2\n\t" |
|
87 " add %1, %%o2, %%o3\n\t" |
|
88 " cas [%2], %%o2, %%o3\n\t" |
|
89 " cmp %%o2, %%o3\n\t" |
|
90 " bne 1b\n\t" |
|
91 " nop\n\t" |
|
92 " add %1, %%o2, %0\n\t" |
|
93 : "=r" (rv) |
|
94 : "r" (add_value), "r" (dest) |
|
95 : "memory", "o2", "o3"); |
|
96 #endif // _LP64 |
|
97 return rv; |
|
98 } |
|
99 |
|
100 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { |
|
101 return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest); |
|
102 } |
|
103 |
|
104 |
|
105 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { |
|
106 intptr_t rv = exchange_value; |
|
107 __asm__ volatile( |
|
108 " swap [%2],%1\n\t" |
|
109 : "=r" (rv) |
|
110 : "0" (exchange_value) /* we use same register as for return value */, "r" (dest) |
|
111 : "memory"); |
|
112 return rv; |
|
113 } |
|
114 |
|
115 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { |
|
116 intptr_t rv = exchange_value; |
|
117 #ifdef _LP64 |
|
118 __asm__ volatile( |
|
119 "1:\n\t" |
|
120 " mov %1, %%o3\n\t" |
|
121 " ldx [%2], %%o2\n\t" |
|
122 " casx [%2], %%o2, %%o3\n\t" |
|
123 " cmp %%o2, %%o3\n\t" |
|
124 " bne %%xcc, 1b\n\t" |
|
125 " nop\n\t" |
|
126 " mov %%o2, %0\n\t" |
|
127 : "=r" (rv) |
|
128 : "r" (exchange_value), "r" (dest) |
|
129 : "memory", "o2", "o3"); |
|
130 #else //_LP64 |
|
131 __asm__ volatile( |
|
132 "swap [%2],%1\n\t" |
|
133 : "=r" (rv) |
|
134 : "0" (exchange_value) /* we use same register as for return value */, "r" (dest) |
|
135 : "memory"); |
|
136 #endif // _LP64 |
|
137 return rv; |
|
138 } |
|
139 |
|
140 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { |
|
141 return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); |
|
142 } |
|
143 |
|
144 |
|
145 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { |
|
146 jint rv; |
|
147 __asm__ volatile( |
|
148 " cas [%2], %3, %0" |
|
149 : "=r" (rv) |
|
150 : "0" (exchange_value), "r" (dest), "r" (compare_value) |
|
151 : "memory"); |
|
152 return rv; |
|
153 } |
|
154 |
|
155 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { |
|
156 #ifdef _LP64 |
|
157 jlong rv; |
|
158 __asm__ volatile( |
|
159 " casx [%2], %3, %0" |
|
160 : "=r" (rv) |
|
161 : "0" (exchange_value), "r" (dest), "r" (compare_value) |
|
162 : "memory"); |
|
163 return rv; |
|
164 #else //_LP64 |
|
165 assert(VM_Version::v9_instructions_work(), "cas only supported on v9"); |
|
166 volatile jlong_accessor evl, cvl, rv; |
|
167 evl.long_value = exchange_value; |
|
168 cvl.long_value = compare_value; |
|
169 |
|
170 __asm__ volatile( |
|
171 " sllx %2, 32, %2\n\t" |
|
172 " srl %3, 0, %3\n\t" |
|
173 " or %2, %3, %2\n\t" |
|
174 " sllx %5, 32, %5\n\t" |
|
175 " srl %6, 0, %6\n\t" |
|
176 " or %5, %6, %5\n\t" |
|
177 " casx [%4], %5, %2\n\t" |
|
178 " srl %2, 0, %1\n\t" |
|
179 " srlx %2, 32, %0\n\t" |
|
180 : "=r" (rv.words[0]), "=r" (rv.words[1]) |
|
181 : "r" (evl.words[0]), "r" (evl.words[1]), "r" (dest), "r" (cvl.words[0]), "r" (cvl.words[1]) |
|
182 : "memory"); |
|
183 |
|
184 return rv.long_value; |
|
185 #endif //_LP64 |
|
186 } |
|
187 |
|
188 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { |
|
189 intptr_t rv; |
|
190 #ifdef _LP64 |
|
191 __asm__ volatile( |
|
192 " casx [%2], %3, %0" |
|
193 : "=r" (rv) |
|
194 : "0" (exchange_value), "r" (dest), "r" (compare_value) |
|
195 : "memory"); |
|
196 #else //_LP64 |
|
197 __asm__ volatile( |
|
198 " cas [%2], %3, %0" |
|
199 : "=r" (rv) |
|
200 : "0" (exchange_value), "r" (dest), "r" (compare_value) |
|
201 : "memory"); |
|
202 #endif // _LP64 |
|
203 return rv; |
|
204 } |
|
205 |
|
206 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { |
|
207 return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value); |
|
208 } |
|
209 |
|
210 #else // _GNU_SOURCE |
|
211 |
|
212 #if defined(COMPILER2) || defined(_LP64) |
|
213 |
|
214 // This is the interface to the atomic instructions in solaris_sparc.il. |
|
215 // It's very messy because we need to support v8 and these instructions |
|
216 // are illegal there. When sparc v8 is dropped, we can drop out lots of |
|
217 // this code. Also compiler2 does not support v8 so the conditional code |
|
218 // omits the instruction set check. |
|
219 |
|
220 extern "C" jint _Atomic_swap32(jint exchange_value, volatile jint* dest); |
|
221 extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest); |
|
222 |
|
223 extern "C" jint _Atomic_cas32(jint exchange_value, volatile jint* dest, jint compare_value); |
|
224 extern "C" intptr_t _Atomic_cas64(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value); |
|
225 extern "C" jlong _Atomic_casl (jlong exchange_value, volatile jlong* dest, jlong compare_value); |
|
226 |
|
227 extern "C" jint _Atomic_add32(jint inc, volatile jint* dest); |
|
228 extern "C" intptr_t _Atomic_add64(intptr_t add_value, volatile intptr_t* dest); |
|
229 |
|
230 |
|
231 inline jint Atomic::add (jint add_value, volatile jint* dest) { |
|
232 return _Atomic_add32(add_value, dest); |
|
233 } |
|
234 |
|
235 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { |
|
236 #ifdef _LP64 |
|
237 return _Atomic_add64(add_value, dest); |
|
238 #else //_LP64 |
|
239 return _Atomic_add32(add_value, dest); |
|
240 #endif // _LP64 |
|
241 } |
|
242 |
|
243 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { |
|
244 return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest); |
|
245 } |
|
246 |
|
247 |
|
248 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { |
|
249 return _Atomic_swap32(exchange_value, dest); |
|
250 } |
|
251 |
|
252 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { |
|
253 #ifdef _LP64 |
|
254 return _Atomic_swap64(exchange_value, dest); |
|
255 #else // _LP64 |
|
256 return _Atomic_swap32(exchange_value, dest); |
|
257 #endif // _LP64 |
|
258 } |
|
259 |
|
260 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { |
|
261 return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); |
|
262 } |
|
263 |
|
264 |
|
265 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { |
|
266 return _Atomic_cas32(exchange_value, dest, compare_value); |
|
267 } |
|
268 |
|
269 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { |
|
270 #ifdef _LP64 |
|
271 // Return 64 bit value in %o0 |
|
272 return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value); |
|
273 #else // _LP64 |
|
274 assert (VM_Version::v9_instructions_work(), "only supported on v9"); |
|
275 // Return 64 bit value in %o0,%o1 by hand |
|
276 return _Atomic_casl(exchange_value, dest, compare_value); |
|
277 #endif // _LP64 |
|
278 } |
|
279 |
|
280 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { |
|
281 #ifdef _LP64 |
|
282 return _Atomic_cas64(exchange_value, dest, compare_value); |
|
283 #else // _LP64 |
|
284 return _Atomic_cas32(exchange_value, dest, compare_value); |
|
285 #endif // _LP64 |
|
286 } |
|
287 |
|
288 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { |
|
289 return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value); |
|
290 } |
|
291 |
|
292 |
|
293 #else // _LP64 || COMPILER2 |
|
294 |
|
295 |
|
296 // 32-bit compiler1 only |
|
297 |
|
298 inline jint Atomic::add (jint add_value, volatile jint* dest) { |
|
299 return (*os::atomic_add_func)(add_value, dest); |
|
300 } |
|
301 |
|
302 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { |
|
303 return (intptr_t)add((jint)add_value, (volatile jint*)dest); |
|
304 } |
|
305 |
|
306 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { |
|
307 return (void*)add((jint)add_value, (volatile jint*)dest); |
|
308 } |
|
309 |
|
310 |
|
311 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { |
|
312 return (*os::atomic_xchg_func)(exchange_value, dest); |
|
313 } |
|
314 |
|
315 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { |
|
316 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); |
|
317 } |
|
318 |
|
319 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { |
|
320 return (void*)xchg((jint)exchange_value, (volatile jint*)dest); |
|
321 } |
|
322 |
|
323 |
|
324 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { |
|
325 return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value); |
|
326 } |
|
327 |
|
328 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { |
|
329 return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value); |
|
330 } |
|
331 |
|
332 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { |
|
333 return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); |
|
334 } |
|
335 |
|
336 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { |
|
337 return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); |
|
338 } |
|
339 |
|
340 #endif // _LP64 || COMPILER2 |
|
341 |
|
342 #endif // _GNU_SOURCE |