hotspot/src/share/vm/opto/library_call.cpp
changeset 36316 7a83de7aabca
parent 36314 31a4d71411b9
child 36337 d4b2f60ff5a9
--- a/hotspot/src/share/vm/opto/library_call.cpp	Tue Feb 23 17:59:27 2016 +0100
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Tue Feb 23 22:09:41 2016 +0300
@@ -241,7 +241,9 @@
   // Generates the guards that check whether the result of
   // Unsafe.getObject should be recorded in an SATB log buffer.
   void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
-  bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool is_unaligned);
+
+  typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
+  bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
   static bool klass_needs_init_guard(Node* kls);
   bool inline_unsafe_allocate();
   bool inline_unsafe_copyMemory();
@@ -274,9 +276,10 @@
   JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
   void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp);
 
-  typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
-  bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind);
-  bool inline_unsafe_ordered_store(BasicType type);
+  typedef enum { LS_get_add, LS_get_set, LS_cmp_swap, LS_cmp_swap_weak, LS_cmp_exchange } LoadStoreKind;
+  MemNode::MemOrd access_kind_to_memord_LS(AccessKind access_kind, bool is_store);
+  MemNode::MemOrd access_kind_to_memord(AccessKind access_kind);
+  bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind, AccessKind access_kind);
   bool inline_unsafe_fence(vmIntrinsics::ID id);
   bool inline_fp_conversions(vmIntrinsics::ID id);
   bool inline_number_methods(vmIntrinsics::ID id);
@@ -553,86 +556,147 @@
   case vmIntrinsics::_inflateStringC:
   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 
-  case vmIntrinsics::_getObject:                return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,  !is_volatile, false);
-  case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile, false);
-  case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,    !is_volatile, false);
-  case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile, false);
-  case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile, false);
-  case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile, false);
-  case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile, false);
-  case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,   !is_volatile, false);
-  case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,  !is_volatile, false);
-  case vmIntrinsics::_putObject:                return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,  !is_volatile, false);
-  case vmIntrinsics::_putBoolean:               return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN, !is_volatile, false);
-  case vmIntrinsics::_putByte:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,    !is_volatile, false);
-  case vmIntrinsics::_putShort:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile, false);
-  case vmIntrinsics::_putChar:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile, false);
-  case vmIntrinsics::_putInt:                   return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile, false);
-  case vmIntrinsics::_putLong:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile, false);
-  case vmIntrinsics::_putFloat:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,   !is_volatile, false);
-  case vmIntrinsics::_putDouble:                return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,  !is_volatile, false);
-
-  case vmIntrinsics::_getByte_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE,    !is_volatile, false);
-  case vmIntrinsics::_getShort_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT,   !is_volatile, false);
-  case vmIntrinsics::_getChar_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR,    !is_volatile, false);
-  case vmIntrinsics::_getInt_raw:               return inline_unsafe_access( is_native_ptr, !is_store, T_INT,     !is_volatile, false);
-  case vmIntrinsics::_getLong_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_LONG,    !is_volatile, false);
-  case vmIntrinsics::_getFloat_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT,   !is_volatile, false);
-  case vmIntrinsics::_getDouble_raw:            return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE,  !is_volatile, false);
-  case vmIntrinsics::_getAddress_raw:           return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile, false);
-
-  case vmIntrinsics::_putByte_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_BYTE,    !is_volatile, false);
-  case vmIntrinsics::_putShort_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_SHORT,   !is_volatile, false);
-  case vmIntrinsics::_putChar_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_CHAR,    !is_volatile, false);
-  case vmIntrinsics::_putInt_raw:               return inline_unsafe_access( is_native_ptr,  is_store, T_INT,     !is_volatile, false);
-  case vmIntrinsics::_putLong_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_LONG,    !is_volatile, false);
-  case vmIntrinsics::_putFloat_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_FLOAT,   !is_volatile, false);
-  case vmIntrinsics::_putDouble_raw:            return inline_unsafe_access( is_native_ptr,  is_store, T_DOUBLE,  !is_volatile, false);
-  case vmIntrinsics::_putAddress_raw:           return inline_unsafe_access( is_native_ptr,  is_store, T_ADDRESS, !is_volatile, false);
-
-  case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   is_volatile, false);
-  case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  is_volatile, false);
-  case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     is_volatile, false);
-  case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    is_volatile, false);
-  case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     is_volatile, false);
-  case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      is_volatile, false);
-  case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     is_volatile, false);
-  case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    is_volatile, false);
-  case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   is_volatile, false);
-
-  case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   is_volatile, false);
-  case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  is_volatile, false);
-  case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     is_volatile, false);
-  case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    is_volatile, false);
-  case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     is_volatile, false);
-  case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      is_volatile, false);
-  case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     is_volatile, false);
-  case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    is_volatile, false);
-  case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   is_volatile, false);
-
-  case vmIntrinsics::_getShortUnaligned:        return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile, true);
-  case vmIntrinsics::_getCharUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile, true);
-  case vmIntrinsics::_getIntUnaligned:          return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile, true);
-  case vmIntrinsics::_getLongUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile, true);
-
-  case vmIntrinsics::_putShortUnaligned:        return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile, true);
-  case vmIntrinsics::_putCharUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile, true);
-  case vmIntrinsics::_putIntUnaligned:          return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile, true);
-  case vmIntrinsics::_putLongUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile, true);
-
-  case vmIntrinsics::_compareAndSwapObject:     return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
-  case vmIntrinsics::_compareAndSwapInt:        return inline_unsafe_load_store(T_INT,    LS_cmpxchg);
-  case vmIntrinsics::_compareAndSwapLong:       return inline_unsafe_load_store(T_LONG,   LS_cmpxchg);
-
-  case vmIntrinsics::_putOrderedObject:         return inline_unsafe_ordered_store(T_OBJECT);
-  case vmIntrinsics::_putOrderedInt:            return inline_unsafe_ordered_store(T_INT);
-  case vmIntrinsics::_putOrderedLong:           return inline_unsafe_ordered_store(T_LONG);
-
-  case vmIntrinsics::_getAndAddInt:             return inline_unsafe_load_store(T_INT,    LS_xadd);
-  case vmIntrinsics::_getAndAddLong:            return inline_unsafe_load_store(T_LONG,   LS_xadd);
-  case vmIntrinsics::_getAndSetInt:             return inline_unsafe_load_store(T_INT,    LS_xchg);
-  case vmIntrinsics::_getAndSetLong:            return inline_unsafe_load_store(T_LONG,   LS_xchg);
-  case vmIntrinsics::_getAndSetObject:          return inline_unsafe_load_store(T_OBJECT, LS_xchg);
+  case vmIntrinsics::_getObject:                return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   Relaxed, false);
+  case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  Relaxed, false);
+  case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     Relaxed, false);
+  case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    Relaxed, false);
+  case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     Relaxed, false);
+  case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      Relaxed, false);
+  case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     Relaxed, false);
+  case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    Relaxed, false);
+  case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   Relaxed, false);
+
+  case vmIntrinsics::_putObject:                return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   Relaxed, false);
+  case vmIntrinsics::_putBoolean:               return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  Relaxed, false);
+  case vmIntrinsics::_putByte:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     Relaxed, false);
+  case vmIntrinsics::_putShort:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    Relaxed, false);
+  case vmIntrinsics::_putChar:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     Relaxed, false);
+  case vmIntrinsics::_putInt:                   return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Relaxed, false);
+  case vmIntrinsics::_putLong:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Relaxed, false);
+  case vmIntrinsics::_putFloat:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    Relaxed, false);
+  case vmIntrinsics::_putDouble:                return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   Relaxed, false);
+
+  case vmIntrinsics::_getByte_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE,     Relaxed, false);
+  case vmIntrinsics::_getShort_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT,    Relaxed, false);
+  case vmIntrinsics::_getChar_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR,     Relaxed, false);
+  case vmIntrinsics::_getInt_raw:               return inline_unsafe_access( is_native_ptr, !is_store, T_INT,      Relaxed, false);
+  case vmIntrinsics::_getLong_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_LONG,     Relaxed, false);
+  case vmIntrinsics::_getFloat_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT,    Relaxed, false);
+  case vmIntrinsics::_getDouble_raw:            return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE,   Relaxed, false);
+  case vmIntrinsics::_getAddress_raw:           return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS,  Relaxed, false);
+
+  case vmIntrinsics::_putByte_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_BYTE,     Relaxed, false);
+  case vmIntrinsics::_putShort_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_SHORT,    Relaxed, false);
+  case vmIntrinsics::_putChar_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_CHAR,     Relaxed, false);
+  case vmIntrinsics::_putInt_raw:               return inline_unsafe_access( is_native_ptr,  is_store, T_INT,      Relaxed, false);
+  case vmIntrinsics::_putLong_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_LONG,     Relaxed, false);
+  case vmIntrinsics::_putFloat_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_FLOAT,    Relaxed, false);
+  case vmIntrinsics::_putDouble_raw:            return inline_unsafe_access( is_native_ptr,  is_store, T_DOUBLE,   Relaxed, false);
+  case vmIntrinsics::_putAddress_raw:           return inline_unsafe_access( is_native_ptr,  is_store, T_ADDRESS,  Relaxed, false);
+
+  case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   Volatile, false);
+  case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  Volatile, false);
+  case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     Volatile, false);
+  case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    Volatile, false);
+  case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     Volatile, false);
+  case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      Volatile, false);
+  case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     Volatile, false);
+  case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    Volatile, false);
+  case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   Volatile, false);
+
+  case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   Volatile, false);
+  case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  Volatile, false);
+  case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     Volatile, false);
+  case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    Volatile, false);
+  case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     Volatile, false);
+  case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Volatile, false);
+  case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Volatile, false);
+  case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    Volatile, false);
+  case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   Volatile, false);
+
+  case vmIntrinsics::_getShortUnaligned:        return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    Relaxed, true);
+  case vmIntrinsics::_getCharUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     Relaxed, true);
+  case vmIntrinsics::_getIntUnaligned:          return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      Relaxed, true);
+  case vmIntrinsics::_getLongUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     Relaxed, true);
+
+  case vmIntrinsics::_putShortUnaligned:        return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    Relaxed, true);
+  case vmIntrinsics::_putCharUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     Relaxed, true);
+  case vmIntrinsics::_putIntUnaligned:          return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Relaxed, true);
+  case vmIntrinsics::_putLongUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Relaxed, true);
+
+  case vmIntrinsics::_putOrderedObject:         return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   Release, false);
+  case vmIntrinsics::_putOrderedInt:            return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Release, false);
+  case vmIntrinsics::_putOrderedLong:           return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Release, false);
+
+  case vmIntrinsics::_getObjectAcquire:         return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   Acquire, false);
+  case vmIntrinsics::_getBooleanAcquire:        return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  Acquire, false);
+  case vmIntrinsics::_getByteAcquire:           return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     Acquire, false);
+  case vmIntrinsics::_getShortAcquire:          return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    Acquire, false);
+  case vmIntrinsics::_getCharAcquire:           return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     Acquire, false);
+  case vmIntrinsics::_getIntAcquire:            return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      Acquire, false);
+  case vmIntrinsics::_getLongAcquire:           return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     Acquire, false);
+  case vmIntrinsics::_getFloatAcquire:          return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    Acquire, false);
+  case vmIntrinsics::_getDoubleAcquire:         return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   Acquire, false);
+
+  case vmIntrinsics::_putObjectRelease:         return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   Release, false);
+  case vmIntrinsics::_putBooleanRelease:        return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  Release, false);
+  case vmIntrinsics::_putByteRelease:           return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     Release, false);
+  case vmIntrinsics::_putShortRelease:          return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    Release, false);
+  case vmIntrinsics::_putCharRelease:           return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     Release, false);
+  case vmIntrinsics::_putIntRelease:            return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Release, false);
+  case vmIntrinsics::_putLongRelease:           return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Release, false);
+  case vmIntrinsics::_putFloatRelease:          return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    Release, false);
+  case vmIntrinsics::_putDoubleRelease:         return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   Release, false);
+
+  case vmIntrinsics::_getObjectOpaque:          return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   Opaque, false);
+  case vmIntrinsics::_getBooleanOpaque:         return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  Opaque, false);
+  case vmIntrinsics::_getByteOpaque:            return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     Opaque, false);
+  case vmIntrinsics::_getShortOpaque:           return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    Opaque, false);
+  case vmIntrinsics::_getCharOpaque:            return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     Opaque, false);
+  case vmIntrinsics::_getIntOpaque:             return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      Opaque, false);
+  case vmIntrinsics::_getLongOpaque:            return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     Opaque, false);
+  case vmIntrinsics::_getFloatOpaque:           return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    Opaque, false);
+  case vmIntrinsics::_getDoubleOpaque:          return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   Opaque, false);
+
+  case vmIntrinsics::_putObjectOpaque:          return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   Opaque, false);
+  case vmIntrinsics::_putBooleanOpaque:         return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  Opaque, false);
+  case vmIntrinsics::_putByteOpaque:            return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     Opaque, false);
+  case vmIntrinsics::_putShortOpaque:           return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    Opaque, false);
+  case vmIntrinsics::_putCharOpaque:            return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     Opaque, false);
+  case vmIntrinsics::_putIntOpaque:             return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Opaque, false);
+  case vmIntrinsics::_putLongOpaque:            return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Opaque, false);
+  case vmIntrinsics::_putFloatOpaque:           return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    Opaque, false);
+  case vmIntrinsics::_putDoubleOpaque:          return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   Opaque, false);
+
+  case vmIntrinsics::_compareAndSwapObject:             return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap,      Volatile);
+  case vmIntrinsics::_compareAndSwapInt:                return inline_unsafe_load_store(T_INT,    LS_cmp_swap,      Volatile);
+  case vmIntrinsics::_compareAndSwapLong:               return inline_unsafe_load_store(T_LONG,   LS_cmp_swap,      Volatile);
+
+  case vmIntrinsics::_weakCompareAndSwapObject:         return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
+  case vmIntrinsics::_weakCompareAndSwapObjectAcquire:  return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
+  case vmIntrinsics::_weakCompareAndSwapObjectRelease:  return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
+  case vmIntrinsics::_weakCompareAndSwapInt:            return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Relaxed);
+  case vmIntrinsics::_weakCompareAndSwapIntAcquire:     return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Acquire);
+  case vmIntrinsics::_weakCompareAndSwapIntRelease:     return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Release);
+  case vmIntrinsics::_weakCompareAndSwapLong:           return inline_unsafe_load_store(T_LONG,   LS_cmp_swap_weak, Relaxed);
+  case vmIntrinsics::_weakCompareAndSwapLongAcquire:    return inline_unsafe_load_store(T_LONG,   LS_cmp_swap_weak, Acquire);
+  case vmIntrinsics::_weakCompareAndSwapLongRelease:    return inline_unsafe_load_store(T_LONG,   LS_cmp_swap_weak, Release);
+
+  case vmIntrinsics::_compareAndExchangeObjectVolatile: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange,  Volatile);
+  case vmIntrinsics::_compareAndExchangeObjectAcquire:  return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange,  Acquire);
+  case vmIntrinsics::_compareAndExchangeObjectRelease:  return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange,  Release);
+  case vmIntrinsics::_compareAndExchangeIntVolatile:    return inline_unsafe_load_store(T_INT,    LS_cmp_exchange,  Volatile);
+  case vmIntrinsics::_compareAndExchangeIntAcquire:     return inline_unsafe_load_store(T_INT,    LS_cmp_exchange,  Acquire);
+  case vmIntrinsics::_compareAndExchangeIntRelease:     return inline_unsafe_load_store(T_INT,    LS_cmp_exchange,  Release);
+  case vmIntrinsics::_compareAndExchangeLongVolatile:   return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Volatile);
+  case vmIntrinsics::_compareAndExchangeLongAcquire:    return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Acquire);
+  case vmIntrinsics::_compareAndExchangeLongRelease:    return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Release);
+
+  case vmIntrinsics::_getAndAddInt:                     return inline_unsafe_load_store(T_INT,    LS_get_add,       Volatile);
+  case vmIntrinsics::_getAndAddLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_add,       Volatile);
+  case vmIntrinsics::_getAndSetInt:                     return inline_unsafe_load_store(T_INT,    LS_get_set,       Volatile);
+  case vmIntrinsics::_getAndSetLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_set,       Volatile);
+  case vmIntrinsics::_getAndSetObject:                  return inline_unsafe_load_store(T_OBJECT, LS_get_set,       Volatile);
 
   case vmIntrinsics::_loadFence:
   case vmIntrinsics::_storeFence:
@@ -2284,8 +2348,10 @@
   return NULL;
 }
 
-bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) {
+bool LibraryCallKit::inline_unsafe_access(const bool is_native_ptr, bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
   if (callee()->is_static())  return false;  // caller must have the capability!
+  guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
+  guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
 
 #ifndef PRODUCT
   {
@@ -2374,7 +2440,42 @@
   // the barriers get omitted and the unsafe reference begins to "pollute"
   // the alias analysis of the rest of the graph, either Compile::can_alias
   // or Compile::must_alias will throw a diagnostic assert.)
-  bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
+  bool need_mem_bar;
+  switch (kind) {
+      case Relaxed:
+          need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
+          break;
+      case Opaque:
+          // Opaque uses CPUOrder membars for protection against code movement.
+      case Acquire:
+      case Release:
+      case Volatile:
+          need_mem_bar = true;
+          break;
+      default:
+          ShouldNotReachHere();
+  }
+
+  // Some accesses require access atomicity for all types, notably longs and doubles.
+  // When AlwaysAtomicAccesses is enabled, all accesses are atomic.
+  bool requires_atomic_access = false;
+  switch (kind) {
+      case Relaxed:
+      case Opaque:
+          requires_atomic_access = AlwaysAtomicAccesses;
+          break;
+      case Acquire:
+      case Release:
+      case Volatile:
+          requires_atomic_access = true;
+          break;
+      default:
+          ShouldNotReachHere();
+  }
+
+  // Figure out the memory ordering.
+  // Acquire/Release/Volatile accesses require marking the loads/stores with MemOrd
+  MemNode::MemOrd mo = access_kind_to_memord_LS(kind, is_store);
 
   // If we are reading the value of the referent field of a Reference
   // object (either by using Unsafe directly or through reflection)
@@ -2401,22 +2502,30 @@
   // and it is not possible to fully distinguish unintended nulls
   // from intended ones in this API.
 
-  if (is_volatile) {
-    // We need to emit leading and trailing CPU membars (see below) in
-    // addition to memory membars when is_volatile. This is a little
-    // too strong, but avoids the need to insert per-alias-type
-    // volatile membars (for stores; compare Parse::do_put_xxx), which
-    // we cannot do effectively here because we probably only have a
-    // rough approximation of type.
-    need_mem_bar = true;
-    // For Stores, place a memory ordering barrier now.
-    if (is_store) {
-      insert_mem_bar(Op_MemBarRelease);
-    } else {
-      if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
-        insert_mem_bar(Op_MemBarVolatile);
+  // We need to emit leading and trailing CPU membars (see below) in
+  // addition to memory membars for special access modes. This is a little
+  // too strong, but avoids the need to insert per-alias-type
+  // volatile membars (for stores; compare Parse::do_put_xxx), which
+  // we cannot do effectively here because we probably only have a
+  // rough approximation of type.
+
+  switch(kind) {
+    case Relaxed:
+    case Opaque:
+    case Acquire:
+      break;
+    case Release:
+    case Volatile:
+      if (is_store) {
+        insert_mem_bar(Op_MemBarRelease);
+      } else {
+        if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+          insert_mem_bar(Op_MemBarVolatile);
+        }
       }
-    }
+      break;
+    default:
+      ShouldNotReachHere();
   }
 
   // Memory barrier to prevent normal and 'unsafe' accesses from
@@ -2460,10 +2569,9 @@
       }
     }
     if (p == NULL) {
-      MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
       // To be valid, unsafe loads may depend on other conditions than
       // the one that guards them: pin the Load node
-      p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
+      p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, requires_atomic_access, unaligned, mismatched);
       // load value
       switch (type) {
       case T_BOOLEAN:
@@ -2477,7 +2585,9 @@
         break;
       case T_OBJECT:
         if (need_read_barrier) {
-          insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
+          // We do not require a mem bar inside pre_barrier if need_mem_bar
+          // is set: the barriers would be emitted by us.
+          insert_pre_barrier(heap_base_oop, offset, p, !need_mem_bar);
         }
         break;
       case T_ADDRESS:
@@ -2508,9 +2618,8 @@
       break;
     }
 
-    MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
-    if (type != T_OBJECT ) {
-      (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
+    if (type != T_OBJECT) {
+      (void) store_to_memory(control(), adr, val, type, adr_type, mo, requires_atomic_access, unaligned, mismatched);
     } else {
       // Possibly an oop being stored to Java heap or native memory
       if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
@@ -2531,7 +2640,7 @@
           // Update IdealKit memory.
           __ sync_kit(this);
         } __ else_(); {
-          __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile, mismatched);
+          __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, requires_atomic_access, mismatched);
         } __ end_if();
         // Final sync IdealKit and GraphKit.
         final_sync(ideal);
@@ -2540,14 +2649,23 @@
     }
   }
 
-  if (is_volatile) {
-    if (!is_store) {
-      insert_mem_bar(Op_MemBarAcquire);
-    } else {
-      if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
-        insert_mem_bar(Op_MemBarVolatile);
+  switch(kind) {
+    case Relaxed:
+    case Opaque:
+    case Release:
+      break;
+    case Acquire:
+    case Volatile:
+      if (!is_store) {
+        insert_mem_bar(Op_MemBarAcquire);
+      } else {
+        if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+          insert_mem_bar(Op_MemBarVolatile);
+        }
       }
-    }
+      break;
+    default:
+      ShouldNotReachHere();
   }
 
   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
@@ -2558,21 +2676,52 @@
 //----------------------------inline_unsafe_load_store----------------------------
 // This method serves a couple of different customers (depending on LoadStoreKind):
 //
-// LS_cmpxchg:
-//   public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
-//   public final native boolean compareAndSwapInt(   Object o, long offset, int    expected, int    x);
-//   public final native boolean compareAndSwapLong(  Object o, long offset, long   expected, long   x);
+// LS_cmp_swap:
+//
+//   boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
+//   boolean compareAndSwapInt(   Object o, long offset, int    expected, int    x);
+//   boolean compareAndSwapLong(  Object o, long offset, long   expected, long   x);
+//
+// LS_cmp_swap_weak:
+//
+//   boolean weakCompareAndSwapObject(       Object o, long offset, Object expected, Object x);
+//   boolean weakCompareAndSwapObjectAcquire(Object o, long offset, Object expected, Object x);
+//   boolean weakCompareAndSwapObjectRelease(Object o, long offset, Object expected, Object x);
+//
+//   boolean weakCompareAndSwapInt(          Object o, long offset, int    expected, int    x);
+//   boolean weakCompareAndSwapIntAcquire(   Object o, long offset, int    expected, int    x);
+//   boolean weakCompareAndSwapIntRelease(   Object o, long offset, int    expected, int    x);
+//
+//   boolean weakCompareAndSwapLong(         Object o, long offset, long   expected, long   x);
+//   boolean weakCompareAndSwapLongAcquire(  Object o, long offset, long   expected, long   x);
+//   boolean weakCompareAndSwapLongRelease(  Object o, long offset, long   expected, long   x);
 //
-// LS_xadd:
-//   public int  getAndAddInt( Object o, long offset, int  delta)
-//   public long getAndAddLong(Object o, long offset, long delta)
+// LS_cmp_exchange:
+//
+//   Object compareAndExchangeObjectVolatile(Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeObjectAcquire( Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeObjectRelease( Object o, long offset, Object expected, Object x);
+//
+//   Object compareAndExchangeIntVolatile(   Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeIntAcquire(    Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeIntRelease(    Object o, long offset, Object expected, Object x);
 //
-// LS_xchg:
+//   Object compareAndExchangeLongVolatile(  Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeLongAcquire(   Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeLongRelease(   Object o, long offset, Object expected, Object x);
+//
+// LS_get_add:
+//
+//   int  getAndAddInt( Object o, long offset, int  delta)
+//   long getAndAddLong(Object o, long offset, long delta)
+//
+// LS_get_set:
+//
 //   int    getAndSet(Object o, long offset, int    newValue)
 //   long   getAndSet(Object o, long offset, long   newValue)
 //   Object getAndSet(Object o, long offset, Object newValue)
 //
-bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) {
+bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadStoreKind kind, const AccessKind access_kind) {
   // This basic scheme here is the same as inline_unsafe_access, but
   // differs in enough details that combining them would make the code
   // overly confusing.  (This is a true fact! I originally combined
@@ -2589,7 +2738,9 @@
     // Check the signatures.
     ciSignature* sig = callee()->signature();
     rtype = sig->return_type()->basic_type();
-    if (kind == LS_xadd || kind == LS_xchg) {
+    switch(kind) {
+      case LS_get_add:
+      case LS_get_set: {
       // Check the signatures.
 #ifdef ASSERT
       assert(rtype == type, "get and set must return the expected type");
@@ -2598,7 +2749,10 @@
       assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
       assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
 #endif // ASSERT
-    } else if (kind == LS_cmpxchg) {
+        break;
+      }
+      case LS_cmp_swap:
+      case LS_cmp_swap_weak: {
       // Check the signatures.
 #ifdef ASSERT
       assert(rtype == T_BOOLEAN, "CAS must return boolean");
@@ -2606,8 +2760,20 @@
       assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
       assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
 #endif // ASSERT
-    } else {
-      ShouldNotReachHere();
+        break;
+      }
+      case LS_cmp_exchange: {
+      // Check the signatures.
+#ifdef ASSERT
+      assert(rtype == type, "CAS must return the expected type");
+      assert(sig->count() == 4, "CAS has 4 arguments");
+      assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
+      assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
+#endif // ASSERT
+        break;
+      }
+      default:
+        ShouldNotReachHere();
     }
   }
 #endif //PRODUCT
@@ -2620,19 +2786,29 @@
   Node* offset   = NULL;
   Node* oldval   = NULL;
   Node* newval   = NULL;
-  if (kind == LS_cmpxchg) {
-    const bool two_slot_type = type2size[type] == 2;
-    receiver = argument(0);  // type: oop
-    base     = argument(1);  // type: oop
-    offset   = argument(2);  // type: long
-    oldval   = argument(4);  // type: oop, int, or long
-    newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
-  } else if (kind == LS_xadd || kind == LS_xchg){
-    receiver = argument(0);  // type: oop
-    base     = argument(1);  // type: oop
-    offset   = argument(2);  // type: long
-    oldval   = NULL;
-    newval   = argument(4);  // type: oop, int, or long
+  switch(kind) {
+    case LS_cmp_swap:
+    case LS_cmp_swap_weak:
+    case LS_cmp_exchange: {
+      const bool two_slot_type = type2size[type] == 2;
+      receiver = argument(0);  // type: oop
+      base     = argument(1);  // type: oop
+      offset   = argument(2);  // type: long
+      oldval   = argument(4);  // type: oop, int, or long
+      newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
+      break;
+    }
+    case LS_get_add:
+    case LS_get_set: {
+      receiver = argument(0);  // type: oop
+      base     = argument(1);  // type: oop
+      offset   = argument(2);  // type: long
+      oldval   = NULL;
+      newval   = argument(4);  // type: oop, int, or long
+      break;
+    }
+    default:
+      ShouldNotReachHere();
   }
 
   // Null check receiver.
@@ -2657,11 +2833,23 @@
   Compile::AliasType* alias_type = C->alias_type(adr_type);
   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
 
-  if (kind == LS_xchg && type == T_OBJECT) {
-    const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
-    if (tjp != NULL) {
-      value_type = tjp;
+  switch (kind) {
+    case LS_get_set:
+    case LS_cmp_exchange: {
+      if (type == T_OBJECT) {
+        const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
+        if (tjp != NULL) {
+          value_type = tjp;
+        }
+      }
+      break;
     }
+    case LS_cmp_swap:
+    case LS_cmp_swap_weak:
+    case LS_get_add:
+      break;
+    default:
+      ShouldNotReachHere();
   }
 
   int alias_idx = C->get_alias_index(adr_type);
@@ -2671,9 +2859,22 @@
   // into actual barriers on most machines, but we still need rest of
   // compiler to respect ordering.
 
-  insert_mem_bar(Op_MemBarRelease);
+  switch (access_kind) {
+    case Relaxed:
+    case Acquire:
+      break;
+    case Release:
+    case Volatile:
+      insert_mem_bar(Op_MemBarRelease);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
   insert_mem_bar(Op_MemBarCPUOrder);
 
+  // Figure out the memory ordering.
+  MemNode::MemOrd mo = access_kind_to_memord(access_kind);
+
   // 4984716: MemBars must be inserted before this
   //          memory node in order to avoid a false
   //          dependency which will confuse the scheduler.
@@ -2684,25 +2885,45 @@
   Node* load_store = NULL;
   switch(type) {
   case T_INT:
-    if (kind == LS_xadd) {
-      load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
-    } else if (kind == LS_xchg) {
-      load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
-    } else if (kind == LS_cmpxchg) {
-      load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval));
-    } else {
-      ShouldNotReachHere();
+    switch(kind) {
+      case LS_get_add:
+        load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
+        break;
+      case LS_get_set:
+        load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
+        break;
+      case LS_cmp_swap_weak:
+        load_store = _gvn.transform(new WeakCompareAndSwapINode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_swap:
+        load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_exchange:
+        load_store = _gvn.transform(new CompareAndExchangeINode(control(), mem, adr, newval, oldval, adr_type, mo));
+        break;
+      default:
+        ShouldNotReachHere();
     }
     break;
   case T_LONG:
-    if (kind == LS_xadd) {
-      load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
-    } else if (kind == LS_xchg) {
-      load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
-    } else if (kind == LS_cmpxchg) {
-      load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval));
-    } else {
-      ShouldNotReachHere();
+    switch(kind) {
+      case LS_get_add:
+        load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
+        break;
+      case LS_get_set:
+        load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
+        break;
+      case LS_cmp_swap_weak:
+        load_store = _gvn.transform(new WeakCompareAndSwapLNode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_swap:
+        load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_exchange:
+        load_store = _gvn.transform(new CompareAndExchangeLNode(control(), mem, adr, newval, oldval, adr_type, mo));
+        break;
+      default:
+        ShouldNotReachHere();
     }
     break;
   case T_OBJECT:
@@ -2713,65 +2934,109 @@
       newval = _gvn.makecon(TypePtr::NULL_PTR);
 
     // Reference stores need a store barrier.
-    if (kind == LS_xchg) {
-      // If pre-barrier must execute before the oop store, old value will require do_load here.
-      if (!can_move_pre_barrier()) {
-        pre_barrier(true /* do_load*/,
-                    control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
-                    NULL /* pre_val*/,
+    switch(kind) {
+      case LS_get_set: {
+        // If pre-barrier must execute before the oop store, old value will require do_load here.
+        if (!can_move_pre_barrier()) {
+          pre_barrier(true /* do_load*/,
+                      control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
+                      NULL /* pre_val*/,
+                      T_OBJECT);
+        } // Else move pre_barrier to use load_store value, see below.
+        break;
+      }
+      case LS_cmp_swap_weak:
+      case LS_cmp_swap:
+      case LS_cmp_exchange: {
+        // Same as for newval above:
+        if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
+          oldval = _gvn.makecon(TypePtr::NULL_PTR);
+        }
+        // The only known value which might get overwritten is oldval.
+        pre_barrier(false /* do_load */,
+                    control(), NULL, NULL, max_juint, NULL, NULL,
+                    oldval /* pre_val */,
                     T_OBJECT);
-      } // Else move pre_barrier to use load_store value, see below.
-    } else if (kind == LS_cmpxchg) {
-      // Same as for newval above:
-      if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
-        oldval = _gvn.makecon(TypePtr::NULL_PTR);
+        break;
       }
-      // The only known value which might get overwritten is oldval.
-      pre_barrier(false /* do_load */,
-                  control(), NULL, NULL, max_juint, NULL, NULL,
-                  oldval /* pre_val */,
-                  T_OBJECT);
-    } else {
-      ShouldNotReachHere();
+      default:
+        ShouldNotReachHere();
     }
 
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       Node *newval_enc = _gvn.transform(new EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
-      if (kind == LS_xchg) {
-        load_store = _gvn.transform(new GetAndSetNNode(control(), mem, adr,
-                                                       newval_enc, adr_type, value_type->make_narrowoop()));
-      } else {
-        assert(kind == LS_cmpxchg, "wrong LoadStore operation");
-        Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
-        load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr,
-                                                                newval_enc, oldval_enc));
+
+      switch(kind) {
+        case LS_get_set:
+          load_store = _gvn.transform(new GetAndSetNNode(control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
+          break;
+        case LS_cmp_swap_weak: {
+          Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
+          load_store = _gvn.transform(new WeakCompareAndSwapNNode(control(), mem, adr, newval_enc, oldval_enc, mo));
+          break;
+        }
+        case LS_cmp_swap: {
+          Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
+          load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr, newval_enc, oldval_enc, mo));
+          break;
+        }
+        case LS_cmp_exchange: {
+          Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
+          load_store = _gvn.transform(new CompareAndExchangeNNode(control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
+          break;
+        }
+        default:
+          ShouldNotReachHere();
       }
     } else
 #endif
-    {
-      if (kind == LS_xchg) {
+    switch (kind) {
+      case LS_get_set:
         load_store = _gvn.transform(new GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
-      } else {
-        assert(kind == LS_cmpxchg, "wrong LoadStore operation");
-        load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval));
-      }
+        break;
+      case LS_cmp_swap_weak:
+        load_store = _gvn.transform(new WeakCompareAndSwapPNode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_swap:
+        load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_exchange:
+        load_store = _gvn.transform(new CompareAndExchangePNode(control(), mem, adr, newval, oldval, adr_type, value_type->is_oopptr(), mo));
+        break;
+      default:
+        ShouldNotReachHere();
     }
-    if (kind == LS_cmpxchg) {
-      // Emit the post barrier only when the actual store happened.
-      // This makes sense to check only for compareAndSet that can fail to set the value.
-      // CAS success path is marked more likely since we anticipate this is a performance
-      // critical path, while CAS failure path can use the penalty for going through unlikely
-      // path as backoff. Which is still better than doing a store barrier there.
-      IdealKit ideal(this);
-      ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
-        sync_kit(ideal);
-        post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
-        ideal.sync_kit(this);
-      } ideal.end_if();
-      final_sync(ideal);
-    } else {
-      post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
+
+    // Emit the post barrier only when the actual store happened. This makes sense
+    // to check only for LS_cmp_* that can fail to set the value.
+    // LS_cmp_exchange does not produce any branches by default, so there is no
+    // boolean result to piggyback on. TODO: When we merge CompareAndSwap with
+    // CompareAndExchange and move branches here, it would make sense to conditionalize
+    // post_barriers for LS_cmp_exchange as well.
+    //
+    // CAS success path is marked more likely since we anticipate this is a performance
+    // critical path, while CAS failure path can use the penalty for going through unlikely
+    // path as backoff. Which is still better than doing a store barrier there.
+    switch (kind) {
+      case LS_get_set:
+      case LS_cmp_exchange: {
+        post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
+        break;
+      }
+      case LS_cmp_swap_weak:
+      case LS_cmp_swap: {
+        IdealKit ideal(this);
+        ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
+          sync_kit(ideal);
+          post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
+          ideal.sync_kit(this);
+        } ideal.end_if();
+        final_sync(ideal);
+        break;
+      }
+      default:
+        ShouldNotReachHere();
     }
     break;
   default:
@@ -2785,7 +3050,7 @@
   Node* proj = _gvn.transform(new SCMemProjNode(load_store));
   set_memory(proj, alias_idx);
 
-  if (type == T_OBJECT && kind == LS_xchg) {
+  if (type == T_OBJECT && (kind == LS_get_set || kind == LS_cmp_exchange)) {
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       load_store = _gvn.transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
@@ -2804,74 +3069,52 @@
 
   // Add the trailing membar surrounding the access
   insert_mem_bar(Op_MemBarCPUOrder);
-  insert_mem_bar(Op_MemBarAcquire);
+
+  switch (access_kind) {
+    case Relaxed:
+    case Release:
+      break; // do nothing
+    case Acquire:
+    case Volatile:
+      insert_mem_bar(Op_MemBarAcquire);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
 
   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
   set_result(load_store);
   return true;
 }
 
-//----------------------------inline_unsafe_ordered_store----------------------
-// public native void Unsafe.putOrderedObject(Object o, long offset, Object x);
-// public native void Unsafe.putOrderedInt(Object o, long offset, int x);
-// public native void Unsafe.putOrderedLong(Object o, long offset, long x);
-bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
-  // This is another variant of inline_unsafe_access, differing in
-  // that it always issues store-store ("release") barrier and ensures
-  // store-atomicity (which only matters for "long").
-
-  if (callee()->is_static())  return false;  // caller must have the capability!
-
-#ifndef PRODUCT
-  {
-    ResourceMark rm;
-    // Check the signatures.
-    ciSignature* sig = callee()->signature();
-#ifdef ASSERT
-    BasicType rtype = sig->return_type()->basic_type();
-    assert(rtype == T_VOID, "must return void");
-    assert(sig->count() == 3, "has 3 arguments");
-    assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
-    assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
-#endif // ASSERT
-  }
-#endif //PRODUCT
-
-  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
-
-  // Get arguments:
-  Node* receiver = argument(0);  // type: oop
-  Node* base     = argument(1);  // type: oop
-  Node* offset   = argument(2);  // type: long
-  Node* val      = argument(4);  // type: oop, int, or long
-
-  // Null check receiver.
-  receiver = null_check(receiver);
-  if (stopped()) {
-    return true;
-  }
-
-  // Build field offset expression.
-  assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
-  // 32-bit machines ignore the high half of long offsets
-  offset = ConvL2X(offset);
-  Node* adr = make_unsafe_address(base, offset);
-  const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
-  const Type *value_type = Type::get_const_basic_type(type);
-  Compile::AliasType* alias_type = C->alias_type(adr_type);
-
-  insert_mem_bar(Op_MemBarRelease);
-  insert_mem_bar(Op_MemBarCPUOrder);
-  // Ensure that the store is atomic for longs:
-  const bool require_atomic_access = true;
-  Node* store;
-  if (type == T_OBJECT) // reference stores need a store barrier.
-    store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
-  else {
-    store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
-  }
-  insert_mem_bar(Op_MemBarCPUOrder);
-  return true;
+MemNode::MemOrd LibraryCallKit::access_kind_to_memord_LS(AccessKind kind, bool is_store) {
+  MemNode::MemOrd mo = MemNode::unset;
+  switch(kind) {
+    case Opaque:
+    case Relaxed:  mo = MemNode::unordered; break;
+    case Acquire:  mo = MemNode::acquire;   break;
+    case Release:  mo = MemNode::release;   break;
+    case Volatile: mo = is_store ? MemNode::release : MemNode::acquire; break;
+    default:
+      ShouldNotReachHere();
+  }
+  guarantee(mo != MemNode::unset, "Should select memory ordering");
+  return mo;
+}
+
+MemNode::MemOrd LibraryCallKit::access_kind_to_memord(AccessKind kind) {
+  MemNode::MemOrd mo = MemNode::unset;
+  switch(kind) {
+    case Opaque:
+    case Relaxed:  mo = MemNode::unordered; break;
+    case Acquire:  mo = MemNode::acquire;   break;
+    case Release:  mo = MemNode::release;   break;
+    case Volatile: mo = MemNode::seqcst;    break;
+    default:
+      ShouldNotReachHere();
+  }
+  guarantee(mo != MemNode::unset, "Should select memory ordering");
+  return mo;
 }
 
 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {