172 /** |
175 /** |
173 * Helper class to wrap memory accesses in JavaThread::doing_unsafe_access() |
176 * Helper class to wrap memory accesses in JavaThread::doing_unsafe_access() |
174 */ |
177 */ |
175 class GuardUnsafeAccess { |
178 class GuardUnsafeAccess { |
176 JavaThread* _thread; |
179 JavaThread* _thread; |
177 bool _active; |
|
178 |
180 |
179 public: |
181 public: |
180 GuardUnsafeAccess(JavaThread* thread, jobject _obj) : _thread(thread) { |
182 GuardUnsafeAccess(JavaThread* thread) : _thread(thread) { |
181 if (JNIHandles::resolve(_obj) == NULL) { |
183 // native/off-heap access which may raise SIGBUS if accessing |
182 // native/off-heap access which may raise SIGBUS if accessing |
184 // memory mapped file data in a region of the file which has |
183 // memory mapped file data in a region of the file which has |
185 // been truncated and is now invalid |
184 // been truncated and is now invalid |
186 _thread->set_doing_unsafe_access(true); |
185 _thread->set_doing_unsafe_access(true); |
|
186 _active = true; |
|
187 } else { |
|
188 _active = false; |
|
189 } |
|
190 } |
187 } |
191 |
188 |
192 ~GuardUnsafeAccess() { |
189 ~GuardUnsafeAccess() { |
193 if (_active) { |
190 _thread->set_doing_unsafe_access(false); |
194 _thread->set_doing_unsafe_access(false); |
|
195 } |
|
196 } |
191 } |
197 }; |
192 }; |
198 |
193 |
199 public: |
194 public: |
200 MemoryAccess(JavaThread* thread, jobject obj, jlong offset) |
195 MemoryAccess(JavaThread* thread, jobject obj, jlong offset) |
201 : _thread(thread), _obj(obj), _offset(offset) { |
196 : _thread(thread), _obj(JNIHandles::resolve(obj)), _offset((ptrdiff_t)offset) { |
|
197 assert_field_offset_sane(_obj, offset); |
202 } |
198 } |
203 |
199 |
204 template <typename T> |
200 template <typename T> |
205 T get() { |
201 T get() { |
206 GuardUnsafeAccess guard(_thread, _obj); |
202 if (oopDesc::is_null(_obj)) { |
207 |
203 GuardUnsafeAccess guard(_thread); |
208 T* p = (T*)addr(); |
204 T ret = RawAccess<>::load((T*)addr()); |
209 |
205 return normalize_for_read(ret); |
210 T x = normalize_for_read(*p); |
206 } else { |
211 |
207 T ret = HeapAccess<>::load_at(_obj, _offset); |
212 return x; |
208 return normalize_for_read(ret); |
|
209 } |
213 } |
210 } |
214 |
211 |
215 template <typename T> |
212 template <typename T> |
216 void put(T x) { |
213 void put(T x) { |
217 GuardUnsafeAccess guard(_thread, _obj); |
214 if (oopDesc::is_null(_obj)) { |
218 |
215 GuardUnsafeAccess guard(_thread); |
219 T* p = (T*)addr(); |
216 RawAccess<>::store((T*)addr(), normalize_for_write(x)); |
220 |
217 } else { |
221 *p = normalize_for_write(x); |
218 HeapAccess<>::store_at(_obj, _offset, normalize_for_write(x)); |
|
219 } |
222 } |
220 } |
223 |
221 |
224 |
222 |
225 template <typename T> |
223 template <typename T> |
226 T get_volatile() { |
224 T get_volatile() { |
227 GuardUnsafeAccess guard(_thread, _obj); |
225 if (oopDesc::is_null(_obj)) { |
228 |
226 GuardUnsafeAccess guard(_thread); |
229 T* p = (T*)addr(); |
227 volatile T ret = RawAccess<MO_SEQ_CST>::load((volatile T*)addr()); |
230 |
228 return normalize_for_read(ret); |
231 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { |
229 } else { |
232 OrderAccess::fence(); |
230 T ret = HeapAccess<MO_SEQ_CST>::load_at(_obj, _offset); |
233 } |
231 return normalize_for_read(ret); |
234 |
232 } |
235 T x = OrderAccess::load_acquire((volatile T*)p); |
|
236 |
|
237 return normalize_for_read(x); |
|
238 } |
233 } |
239 |
234 |
240 template <typename T> |
235 template <typename T> |
241 void put_volatile(T x) { |
236 void put_volatile(T x) { |
242 GuardUnsafeAccess guard(_thread, _obj); |
237 if (oopDesc::is_null(_obj)) { |
243 |
238 GuardUnsafeAccess guard(_thread); |
244 T* p = (T*)addr(); |
239 RawAccess<MO_SEQ_CST>::store((volatile T*)addr(), normalize_for_write(x)); |
245 |
240 } else { |
246 OrderAccess::release_store_fence((volatile T*)p, normalize_for_write(x)); |
241 HeapAccess<MO_SEQ_CST>::store_at(_obj, _offset, normalize_for_write(x)); |
247 } |
242 } |
248 |
243 } |
249 |
|
250 #ifndef SUPPORTS_NATIVE_CX8 |
|
251 jlong get_jlong_locked() { |
|
252 GuardUnsafeAccess guard(_thread, _obj); |
|
253 |
|
254 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag); |
|
255 |
|
256 jlong* p = (jlong*)addr(); |
|
257 |
|
258 jlong x = Atomic::load(p); |
|
259 |
|
260 return x; |
|
261 } |
|
262 |
|
263 void put_jlong_locked(jlong x) { |
|
264 GuardUnsafeAccess guard(_thread, _obj); |
|
265 |
|
266 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag); |
|
267 |
|
268 jlong* p = (jlong*)addr(); |
|
269 |
|
270 Atomic::store(normalize_for_write(x), p); |
|
271 } |
|
272 #endif |
|
273 }; |
244 }; |
274 |
|
275 // Get/PutObject must be special-cased, since it works with handles. |
|
276 |
|
277 // We could be accessing the referent field in a reference |
|
278 // object. If G1 is enabled then we need to register non-null |
|
279 // referent with the SATB barrier. |
|
280 |
|
281 #if INCLUDE_ALL_GCS |
|
282 static bool is_java_lang_ref_Reference_access(oop o, jlong offset) { |
|
283 if (offset == java_lang_ref_Reference::referent_offset && o != NULL) { |
|
284 Klass* k = o->klass(); |
|
285 if (InstanceKlass::cast(k)->reference_type() != REF_NONE) { |
|
286 assert(InstanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity"); |
|
287 return true; |
|
288 } |
|
289 } |
|
290 return false; |
|
291 } |
|
292 #endif |
|
293 |
|
294 static void ensure_satb_referent_alive(oop o, jlong offset, oop v) { |
|
295 #if INCLUDE_ALL_GCS |
|
296 if (UseG1GC && v != NULL && is_java_lang_ref_Reference_access(o, offset)) { |
|
297 G1SATBCardTableModRefBS::enqueue(v); |
|
298 } |
|
299 #endif |
|
300 } |
|
301 |
245 |
302 // These functions allow a null base pointer with an arbitrary address. |
246 // These functions allow a null base pointer with an arbitrary address. |
303 // But if the base pointer is non-null, the offset should make some sense. |
247 // But if the base pointer is non-null, the offset should make some sense. |
304 // That is, it should be in the range [0, MAX_OBJECT_SIZE]. |
248 // That is, it should be in the range [0, MAX_OBJECT_SIZE]. |
305 UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { |
249 UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { |
306 oop p = JNIHandles::resolve(obj); |
250 oop p = JNIHandles::resolve(obj); |
307 oop v; |
251 assert_field_offset_sane(p, offset); |
308 |
252 oop v = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_load_at(p, offset); |
309 if (UseCompressedOops) { |
|
310 narrowOop n = *(narrowOop*)index_oop_from_field_offset_long(p, offset); |
|
311 v = oopDesc::decode_heap_oop(n); |
|
312 } else { |
|
313 v = *(oop*)index_oop_from_field_offset_long(p, offset); |
|
314 } |
|
315 |
|
316 ensure_satb_referent_alive(p, offset, v); |
|
317 |
|
318 return JNIHandles::make_local(env, v); |
253 return JNIHandles::make_local(env, v); |
319 } UNSAFE_END |
254 } UNSAFE_END |
320 |
255 |
321 UNSAFE_ENTRY(void, Unsafe_PutObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) { |
256 UNSAFE_ENTRY(void, Unsafe_PutObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) { |
322 oop x = JNIHandles::resolve(x_h); |
257 oop x = JNIHandles::resolve(x_h); |
323 oop p = JNIHandles::resolve(obj); |
258 oop p = JNIHandles::resolve(obj); |
324 |
259 assert_field_offset_sane(p, offset); |
325 if (UseCompressedOops) { |
260 HeapAccess<ON_UNKNOWN_OOP_REF>::oop_store_at(p, offset, x); |
326 oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x); |
|
327 } else { |
|
328 oop_store((oop*)index_oop_from_field_offset_long(p, offset), x); |
|
329 } |
|
330 } UNSAFE_END |
261 } UNSAFE_END |
331 |
262 |
332 UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { |
263 UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { |
333 oop p = JNIHandles::resolve(obj); |
264 oop p = JNIHandles::resolve(obj); |
334 void* addr = index_oop_from_field_offset_long(p, offset); |
265 assert_field_offset_sane(p, offset); |
335 |
266 oop v = HeapAccess<MO_SEQ_CST | ON_UNKNOWN_OOP_REF>::oop_load_at(p, offset); |
336 volatile oop v; |
|
337 |
|
338 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { |
|
339 OrderAccess::fence(); |
|
340 } |
|
341 |
|
342 if (UseCompressedOops) { |
|
343 volatile narrowOop n = *(volatile narrowOop*) addr; |
|
344 (void)const_cast<oop&>(v = oopDesc::decode_heap_oop(n)); |
|
345 } else { |
|
346 (void)const_cast<oop&>(v = *(volatile oop*) addr); |
|
347 } |
|
348 |
|
349 ensure_satb_referent_alive(p, offset, v); |
|
350 |
|
351 OrderAccess::acquire(); |
|
352 return JNIHandles::make_local(env, v); |
267 return JNIHandles::make_local(env, v); |
353 } UNSAFE_END |
268 } UNSAFE_END |
354 |
269 |
355 UNSAFE_ENTRY(void, Unsafe_PutObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) { |
270 UNSAFE_ENTRY(void, Unsafe_PutObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) { |
356 oop x = JNIHandles::resolve(x_h); |
271 oop x = JNIHandles::resolve(x_h); |
357 oop p = JNIHandles::resolve(obj); |
272 oop p = JNIHandles::resolve(obj); |
358 void* addr = index_oop_from_field_offset_long(p, offset); |
273 assert_field_offset_sane(p, offset); |
359 OrderAccess::release(); |
274 HeapAccess<MO_SEQ_CST | ON_UNKNOWN_OOP_REF>::oop_store_at(p, offset, x); |
360 |
|
361 if (UseCompressedOops) { |
|
362 oop_store((narrowOop*)addr, x); |
|
363 } else { |
|
364 oop_store((oop*)addr, x); |
|
365 } |
|
366 |
|
367 OrderAccess::fence(); |
|
368 } UNSAFE_END |
275 } UNSAFE_END |
369 |
276 |
370 UNSAFE_ENTRY(jobject, Unsafe_GetUncompressedObject(JNIEnv *env, jobject unsafe, jlong addr)) { |
277 UNSAFE_ENTRY(jobject, Unsafe_GetUncompressedObject(JNIEnv *env, jobject unsafe, jlong addr)) { |
371 oop v = *(oop*) (address) addr; |
278 oop v = *(oop*) (address) addr; |
372 |
|
373 return JNIHandles::make_local(env, v); |
279 return JNIHandles::make_local(env, v); |
374 } UNSAFE_END |
280 } UNSAFE_END |
375 |
|
376 #ifndef SUPPORTS_NATIVE_CX8 |
|
377 |
|
378 // VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'. |
|
379 // |
|
380 // On platforms which do not support atomic compare-and-swap of jlong (8 byte) |
|
381 // values we have to use a lock-based scheme to enforce atomicity. This has to be |
|
382 // applied to all Unsafe operations that set the value of a jlong field. Even so |
|
383 // the compareAndSetLong operation will not be atomic with respect to direct stores |
|
384 // to the field from Java code. It is important therefore that any Java code that |
|
385 // utilizes these Unsafe jlong operations does not perform direct stores. To permit |
|
386 // direct loads of the field from Java code we must also use Atomic::store within the |
|
387 // locked regions. And for good measure, in case there are direct stores, we also |
|
388 // employ Atomic::load within those regions. Note that the field in question must be |
|
389 // volatile and so must have atomic load/store accesses applied at the Java level. |
|
390 // |
|
391 // The locking scheme could utilize a range of strategies for controlling the locking |
|
392 // granularity: from a lock per-field through to a single global lock. The latter is |
|
393 // the simplest and is used for the current implementation. Note that the Java object |
|
394 // that contains the field, can not, in general, be used for locking. To do so can lead |
|
395 // to deadlocks as we may introduce locking into what appears to the Java code to be a |
|
396 // lock-free path. |
|
397 // |
|
398 // As all the locked-regions are very short and themselves non-blocking we can treat |
|
399 // them as leaf routines and elide safepoint checks (ie we don't perform any thread |
|
400 // state transitions even when blocking for the lock). Note that if we do choose to |
|
401 // add safepoint checks and thread state transitions, we must ensure that we calculate |
|
402 // the address of the field _after_ we have acquired the lock, else the object may have |
|
403 // been moved by the GC |
|
404 |
|
405 UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { |
|
406 if (VM_Version::supports_cx8()) { |
|
407 return MemoryAccess(thread, obj, offset).get_volatile<jlong>(); |
|
408 } else { |
|
409 return MemoryAccess(thread, obj, offset).get_jlong_locked(); |
|
410 } |
|
411 } UNSAFE_END |
|
412 |
|
413 UNSAFE_ENTRY(void, Unsafe_PutLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) { |
|
414 if (VM_Version::supports_cx8()) { |
|
415 MemoryAccess(thread, obj, offset).put_volatile<jlong>(x); |
|
416 } else { |
|
417 MemoryAccess(thread, obj, offset).put_jlong_locked(x); |
|
418 } |
|
419 } UNSAFE_END |
|
420 |
|
421 #endif // not SUPPORTS_NATIVE_CX8 |
|
422 |
281 |
423 UNSAFE_LEAF(jboolean, Unsafe_isBigEndian0(JNIEnv *env, jobject unsafe)) { |
282 UNSAFE_LEAF(jboolean, Unsafe_isBigEndian0(JNIEnv *env, jobject unsafe)) { |
424 #ifdef VM_LITTLE_ENDIAN |
283 #ifdef VM_LITTLE_ENDIAN |
425 return false; |
284 return false; |
426 #else |
285 #else |
999 |
855 |
1000 UNSAFE_ENTRY(jobject, Unsafe_CompareAndExchangeObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) { |
856 UNSAFE_ENTRY(jobject, Unsafe_CompareAndExchangeObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) { |
1001 oop x = JNIHandles::resolve(x_h); |
857 oop x = JNIHandles::resolve(x_h); |
1002 oop e = JNIHandles::resolve(e_h); |
858 oop e = JNIHandles::resolve(e_h); |
1003 oop p = JNIHandles::resolve(obj); |
859 oop p = JNIHandles::resolve(obj); |
1004 HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset); |
860 assert_field_offset_sane(p, offset); |
1005 oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true); |
861 oop res = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e); |
1006 if (res == e) { |
|
1007 update_barrier_set((void*)addr, x); |
|
1008 } |
|
1009 return JNIHandles::make_local(env, res); |
862 return JNIHandles::make_local(env, res); |
1010 } UNSAFE_END |
863 } UNSAFE_END |
1011 |
864 |
1012 UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) { |
865 UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) { |
1013 oop p = JNIHandles::resolve(obj); |
866 oop p = JNIHandles::resolve(obj); |
1014 jint* addr = (jint *) index_oop_from_field_offset_long(p, offset); |
867 if (oopDesc::is_null(p)) { |
1015 |
868 volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset); |
1016 return (jint)(Atomic::cmpxchg(x, addr, e)); |
869 return RawAccess<>::atomic_cmpxchg(x, addr, e); |
|
870 } else { |
|
871 assert_field_offset_sane(p, offset); |
|
872 return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e); |
|
873 } |
1017 } UNSAFE_END |
874 } UNSAFE_END |
1018 |
875 |
1019 UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) { |
876 UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) { |
1020 Handle p(THREAD, JNIHandles::resolve(obj)); |
877 oop p = JNIHandles::resolve(obj); |
1021 jlong* addr = (jlong*)index_oop_from_field_offset_long(p(), offset); |
878 if (oopDesc::is_null(p)) { |
1022 |
879 volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset); |
1023 #ifdef SUPPORTS_NATIVE_CX8 |
880 return RawAccess<>::atomic_cmpxchg(x, addr, e); |
1024 return (jlong)(Atomic::cmpxchg(x, addr, e)); |
|
1025 #else |
|
1026 if (VM_Version::supports_cx8()) { |
|
1027 return (jlong)(Atomic::cmpxchg(x, addr, e)); |
|
1028 } else { |
881 } else { |
1029 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag); |
882 assert_field_offset_sane(p, offset); |
1030 |
883 return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e); |
1031 jlong val = Atomic::load(addr); |
884 } |
1032 if (val == e) { |
|
1033 Atomic::store(x, addr); |
|
1034 } |
|
1035 return val; |
|
1036 } |
|
1037 #endif |
|
1038 } UNSAFE_END |
885 } UNSAFE_END |
1039 |
886 |
1040 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) { |
887 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) { |
1041 oop x = JNIHandles::resolve(x_h); |
888 oop x = JNIHandles::resolve(x_h); |
1042 oop e = JNIHandles::resolve(e_h); |
889 oop e = JNIHandles::resolve(e_h); |
1043 oop p = JNIHandles::resolve(obj); |
890 oop p = JNIHandles::resolve(obj); |
1044 HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset); |
891 assert_field_offset_sane(p, offset); |
1045 oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true); |
892 oop ret = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e); |
1046 if (res != e) { |
893 return ret == e; |
1047 return false; |
|
1048 } |
|
1049 |
|
1050 update_barrier_set((void*)addr, x); |
|
1051 |
|
1052 return true; |
|
1053 } UNSAFE_END |
894 } UNSAFE_END |
1054 |
895 |
1055 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) { |
896 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) { |
1056 oop p = JNIHandles::resolve(obj); |
897 oop p = JNIHandles::resolve(obj); |
1057 jint* addr = (jint *)index_oop_from_field_offset_long(p, offset); |
898 if (oopDesc::is_null(p)) { |
1058 |
899 volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset); |
1059 return (jint)(Atomic::cmpxchg(x, addr, e)) == e; |
900 return RawAccess<>::atomic_cmpxchg(x, addr, e) == e; |
|
901 } else { |
|
902 assert_field_offset_sane(p, offset); |
|
903 return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e) == e; |
|
904 } |
1060 } UNSAFE_END |
905 } UNSAFE_END |
1061 |
906 |
1062 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) { |
907 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) { |
1063 Handle p(THREAD, JNIHandles::resolve(obj)); |
908 oop p = JNIHandles::resolve(obj); |
1064 jlong* addr = (jlong*)index_oop_from_field_offset_long(p(), offset); |
909 if (oopDesc::is_null(p)) { |
1065 |
910 volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset); |
1066 #ifdef SUPPORTS_NATIVE_CX8 |
911 return RawAccess<>::atomic_cmpxchg(x, addr, e) == e; |
1067 return (jlong)(Atomic::cmpxchg(x, addr, e)) == e; |
|
1068 #else |
|
1069 if (VM_Version::supports_cx8()) { |
|
1070 return (jlong)(Atomic::cmpxchg(x, addr, e)) == e; |
|
1071 } else { |
912 } else { |
1072 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag); |
913 assert_field_offset_sane(p, offset); |
1073 |
914 return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e) == e; |
1074 jlong val = Atomic::load(addr); |
915 } |
1075 if (val != e) { |
|
1076 return false; |
|
1077 } |
|
1078 |
|
1079 Atomic::store(x, addr); |
|
1080 return true; |
|
1081 } |
|
1082 #endif |
|
1083 } UNSAFE_END |
916 } UNSAFE_END |
1084 |
917 |
1085 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) { |
918 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) { |
1086 EventThreadPark event; |
919 EventThreadPark event; |
1087 HOTSPOT_THREAD_PARK_BEGIN((uintptr_t) thread->parker(), (int) isAbsolute, time); |
920 HOTSPOT_THREAD_PARK_BEGIN((uintptr_t) thread->parker(), (int) isAbsolute, time); |