--- a/src/hotspot/share/prims/unsafe.cpp Thu Nov 30 13:40:07 2017 +0100
+++ b/src/hotspot/share/prims/unsafe.cpp Thu Nov 30 20:37:20 2017 +0100
@@ -146,18 +146,25 @@
* Normalizes values and wraps accesses in
* JavaThread::doing_unsafe_access() if needed.
*/
+template <typename T>
class MemoryAccess : StackObj {
JavaThread* _thread;
oop _obj;
ptrdiff_t _offset;
- // Resolves and returns the address of the memory access
- void* addr() {
- return index_oop_from_field_offset_long(_obj, _offset);
+ // Resolves and returns the address of the memory access.
+ // This raw memory access may fault, so we make sure it happens within the
+ // guarded scope by making the access volatile at least. Since the store
+ // of Thread::set_doing_unsafe_access() is also volatile, these accesses
+ // can not be reordered by the compiler. Therefore, if the access triggers
+ // a fault, we will know that Thread::doing_unsafe_access() returns true.
+ volatile T* addr() {
+ void* addr = index_oop_from_field_offset_long(_obj, _offset);
+ return static_cast<volatile T*>(addr);
}
- template <typename T>
- T normalize_for_write(T x) {
+ template <typename U>
+ U normalize_for_write(U x) {
return x;
}
@@ -165,8 +172,8 @@
return x & 1;
}
- template <typename T>
- T normalize_for_read(T x) {
+ template <typename U>
+ U normalize_for_read(U x) {
return x;
}
@@ -199,11 +206,10 @@
assert_field_offset_sane(_obj, offset);
}
- template <typename T>
T get() {
if (oopDesc::is_null(_obj)) {
GuardUnsafeAccess guard(_thread);
- T ret = RawAccess<>::load((T*)addr());
+ T ret = RawAccess<>::load(addr());
return normalize_for_read(ret);
} else {
T ret = HeapAccess<>::load_at(_obj, _offset);
@@ -211,22 +217,20 @@
}
}
- template <typename T>
void put(T x) {
if (oopDesc::is_null(_obj)) {
GuardUnsafeAccess guard(_thread);
- RawAccess<>::store((T*)addr(), normalize_for_write(x));
+ RawAccess<>::store(addr(), normalize_for_write(x));
} else {
HeapAccess<>::store_at(_obj, _offset, normalize_for_write(x));
}
}
- template <typename T>
T get_volatile() {
if (oopDesc::is_null(_obj)) {
GuardUnsafeAccess guard(_thread);
- volatile T ret = RawAccess<MO_SEQ_CST>::load((volatile T*)addr());
+ volatile T ret = RawAccess<MO_SEQ_CST>::load(addr());
return normalize_for_read(ret);
} else {
T ret = HeapAccess<MO_SEQ_CST>::load_at(_obj, _offset);
@@ -234,11 +238,10 @@
}
}
- template <typename T>
void put_volatile(T x) {
if (oopDesc::is_null(_obj)) {
GuardUnsafeAccess guard(_thread);
- RawAccess<MO_SEQ_CST>::store((volatile T*)addr(), normalize_for_write(x));
+ RawAccess<MO_SEQ_CST>::store(addr(), normalize_for_write(x));
} else {
HeapAccess<MO_SEQ_CST>::store_at(_obj, _offset, normalize_for_write(x));
}
@@ -296,11 +299,11 @@
#define DEFINE_GETSETOOP(java_type, Type) \
\
UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
- return MemoryAccess(thread, obj, offset).get<java_type>(); \
+ return MemoryAccess<java_type>(thread, obj, offset).get(); \
} UNSAFE_END \
\
UNSAFE_ENTRY(void, Unsafe_Put##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
- MemoryAccess(thread, obj, offset).put<java_type>(x); \
+ MemoryAccess<java_type>(thread, obj, offset).put(x); \
} UNSAFE_END \
\
// END DEFINE_GETSETOOP.
@@ -319,11 +322,11 @@
#define DEFINE_GETSETOOP_VOLATILE(java_type, Type) \
\
UNSAFE_ENTRY(java_type, Unsafe_Get##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
- return MemoryAccess(thread, obj, offset).get_volatile<java_type>(); \
+ return MemoryAccess<java_type>(thread, obj, offset).get_volatile(); \
} UNSAFE_END \
\
UNSAFE_ENTRY(void, Unsafe_Put##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
- MemoryAccess(thread, obj, offset).put_volatile<java_type>(x); \
+ MemoryAccess<java_type>(thread, obj, offset).put_volatile(x); \
} UNSAFE_END \
\
// END DEFINE_GETSETOOP_VOLATILE.