50 inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } |
50 inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } |
51 |
51 |
52 // For Sun Studio - implementation is in solaris_x86_[32/64].il. |
52 // For Sun Studio - implementation is in solaris_x86_[32/64].il. |
53 // For gcc - implementation is just below. |
53 // For gcc - implementation is just below. |
54 |
54 |
55 // The lock prefix can be omitted for certain instructions on uniprocessors; to |
|
56 // facilitate this, os::is_MP() is passed as an additional argument. 64-bit |
|
57 // processors are assumed to be multi-threaded and/or multi-core, so the extra |
|
58 // argument is unnecessary. |
|
59 #ifndef _LP64 |
|
60 #define IS_MP_DECL() , int is_mp |
|
61 #define IS_MP_ARG() , (int) os::is_MP() |
|
62 #else |
|
63 #define IS_MP_DECL() |
|
64 #define IS_MP_ARG() |
|
65 #endif // _LP64 |
|
66 |
|
67 extern "C" { |
55 extern "C" { |
68 jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL()); |
56 jint _Atomic_add(jint add_value, volatile jint* dest); |
69 jint _Atomic_xchg(jint exchange_value, volatile jint* dest); |
57 jint _Atomic_xchg(jint exchange_value, volatile jint* dest); |
70 jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, |
58 jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, |
71 jbyte compare_value IS_MP_DECL()); |
59 jbyte compare_value); |
72 jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, |
60 jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, |
73 jint compare_value IS_MP_DECL()); |
61 jint compare_value); |
74 jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, |
62 jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, |
75 jlong compare_value IS_MP_DECL()); |
63 jlong compare_value); |
76 } |
64 } |
77 |
65 |
78 inline jint Atomic::add (jint add_value, volatile jint* dest) { |
66 inline jint Atomic::add (jint add_value, volatile jint* dest) { |
79 return _Atomic_add(add_value, dest IS_MP_ARG()); |
67 return _Atomic_add(add_value, dest); |
80 } |
68 } |
81 |
69 |
82 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { |
70 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { |
83 return _Atomic_xchg(exchange_value, dest); |
71 return _Atomic_xchg(exchange_value, dest); |
84 } |
72 } |
85 |
73 |
86 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE |
74 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE |
87 inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { |
75 inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { |
88 return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value IS_MP_ARG()); |
76 return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value); |
89 } |
77 } |
90 |
78 |
91 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { |
79 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { |
92 return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG()); |
80 return _Atomic_cmpxchg(exchange_value, dest, compare_value); |
93 } |
81 } |
94 |
82 |
95 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { |
83 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { |
96 return _Atomic_cmpxchg_long(exchange_value, dest, compare_value IS_MP_ARG()); |
84 return _Atomic_cmpxchg_long(exchange_value, dest, compare_value); |
97 } |
85 } |
98 |
86 |
99 |
87 |
100 #ifdef AMD64 |
88 #ifdef AMD64 |
101 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } |
89 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } |
172 } |
160 } |
173 |
161 |
174 #endif // AMD64 |
162 #endif // AMD64 |
175 |
163 |
176 #ifdef _GNU_SOURCE |
164 #ifdef _GNU_SOURCE |
177 // Add a lock prefix to an instruction on an MP machine |
|
178 #define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: " |
|
179 |
165 |
180 extern "C" { |
166 extern "C" { |
181 inline jint _Atomic_add(jint add_value, volatile jint* dest, int mp) { |
167 inline jint _Atomic_add(jint add_value, volatile jint* dest) { |
182 jint addend = add_value; |
168 jint addend = add_value; |
183 __asm__ volatile ( LOCK_IF_MP(%3) "xaddl %0,(%2)" |
169 __asm__ volatile ("lock xaddl %0,(%2)" |
184 : "=r" (addend) |
170 : "=r" (addend) |
185 : "0" (addend), "r" (dest), "r" (mp) |
171 : "0" (addend), "r" (dest) |
186 : "cc", "memory"); |
172 : "cc", "memory"); |
187 return addend + add_value; |
173 return addend + add_value; |
188 } |
174 } |
189 |
175 |
190 #ifdef AMD64 |
176 #ifdef AMD64 |
191 inline jlong _Atomic_add_long(jlong add_value, volatile jlong* dest, int mp) { |
177 inline jlong _Atomic_add_long(jlong add_value, volatile jlong* dest) { |
192 intptr_t addend = add_value; |
178 intptr_t addend = add_value; |
193 __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)" |
179 __asm__ __volatile__ ("lock xaddq %0,(%2)" |
194 : "=r" (addend) |
180 : "=r" (addend) |
195 : "0" (addend), "r" (dest), "r" (mp) |
181 : "0" (addend), "r" (dest) |
196 : "cc", "memory"); |
182 : "cc", "memory"); |
197 return addend + add_value; |
183 return addend + add_value; |
198 } |
184 } |
199 |
185 |
200 inline jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest) { |
186 inline jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest) { |
213 : "0" (exchange_value), "r" (dest) |
199 : "0" (exchange_value), "r" (dest) |
214 : "memory"); |
200 : "memory"); |
215 return exchange_value; |
201 return exchange_value; |
216 } |
202 } |
217 |
203 |
218 inline jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, int mp) { |
204 inline jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) { |
219 __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)" |
205 __asm__ volatile ("lock cmpxchgl %1,(%3)" |
220 : "=a" (exchange_value) |
206 : "=a" (exchange_value) |
221 : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp) |
207 : "r" (exchange_value), "a" (compare_value), "r" (dest) |
222 : "cc", "memory"); |
208 : "cc", "memory"); |
223 return exchange_value; |
209 return exchange_value; |
224 } |
210 } |
225 |
211 |
226 |
212 |
227 inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, int mp) { |
213 inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { |
228 __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)" |
214 __asm__ volatile ("lock cmpxchgb %1,(%3)" |
229 : "=a" (exchange_value) |
215 : "=a" (exchange_value) |
230 : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp) |
216 : "q" (exchange_value), "a" (compare_value), "r" (dest) |
231 : "cc", "memory"); |
217 : "cc", "memory"); |
232 return exchange_value; |
218 return exchange_value; |
233 } |
219 } |
234 |
220 |
235 // This is the interface to the atomic instruction in solaris_i486.s. |
221 // This is the interface to the atomic instruction in solaris_i486.s. |
236 jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp); |
222 jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value); |
237 |
223 |
238 inline jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp) { |
224 inline jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value) { |
239 #ifdef AMD64 |
225 #ifdef AMD64 |
240 __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)" |
226 __asm__ __volatile__ ("lock cmpxchgq %1,(%3)" |
241 : "=a" (exchange_value) |
227 : "=a" (exchange_value) |
242 : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp) |
228 : "r" (exchange_value), "a" (compare_value), "r" (dest) |
243 : "cc", "memory"); |
229 : "cc", "memory"); |
244 return exchange_value; |
230 return exchange_value; |
245 #else |
231 #else |
246 return _Atomic_cmpxchg_long_gcc(exchange_value, dest, compare_value, os::is_MP()); |
232 return _Atomic_cmpxchg_long_gcc(exchange_value, dest, compare_value); |
247 |
233 |
248 #if 0 |
234 #if 0 |
249 // The code below does not work presumably because of the bug in gcc |
235 // The code below does not work presumably because of the bug in gcc |
250 // The error message says: |
236 // The error message says: |
251 // can't find a register in class BREG while reloading asm |
237 // can't find a register in class BREG while reloading asm |
253 // with such inline asm code: |
239 // with such inline asm code: |
254 |
240 |
255 volatile jlong_accessor evl, cvl, rv; |
241 volatile jlong_accessor evl, cvl, rv; |
256 evl.long_value = exchange_value; |
242 evl.long_value = exchange_value; |
257 cvl.long_value = compare_value; |
243 cvl.long_value = compare_value; |
258 int mp = os::is_MP(); |
244 |
259 |
245 __asm__ volatile ( |
260 __asm__ volatile ("cmp $0, %%esi\n\t" |
246 "lock cmpxchg8b (%%edi)\n\t" |
261 "je 1f \n\t" |
|
262 "lock\n\t" |
|
263 "1: cmpxchg8b (%%edi)\n\t" |
|
264 : "=a"(cvl.words[0]), "=d"(cvl.words[1]) |
247 : "=a"(cvl.words[0]), "=d"(cvl.words[1]) |
265 : "a"(cvl.words[0]), "d"(cvl.words[1]), |
248 : "a"(cvl.words[0]), "d"(cvl.words[1]), |
266 "b"(evl.words[0]), "c"(evl.words[1]), |
249 "b"(evl.words[0]), "c"(evl.words[1]), |
267 "D"(dest), "S"(mp) |
250 "D"(dest) |
268 : "cc", "memory"); |
251 : "cc", "memory"); |
269 return cvl.long_value; |
252 return cvl.long_value; |
270 #endif // if 0 |
253 #endif // if 0 |
271 #endif // AMD64 |
254 #endif // AMD64 |
272 } |
255 } |
273 } |
256 } |
274 #undef LOCK_IF_MP |
|
275 |
257 |
276 #endif // _GNU_SOURCE |
258 #endif // _GNU_SOURCE |
277 |
259 |
278 #endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP |
260 #endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP |