src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp
changeset 59247 56bf71d64d51
parent 59122 5d73255c2d52
child 59248 e92153ed8bdc
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -32,10 +32,6 @@
 // Note that memory_order_conservative requires a full barrier after atomic stores.
 // See https://patchwork.kernel.org/patch/3575821/
 
-#define FULL_MEM_BARRIER  __sync_synchronize()
-#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
-#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@@ -81,4 +77,25 @@
   }
 }
 
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
+};
+
+template<size_t byte_size>
+struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
+};
+
+template<size_t byte_size>
+struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const { release_store(p, v); OrderAccess::fence(); }
+};
+
 #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP