src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp
changeset 50029 ea0a16ba6ac0
parent 47634 6a0c42c40cd1
child 53244 9807daeb47c4
--- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Mon May 07 18:23:59 2018 +0800
+++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Mon May 07 12:28:11 2018 +0200
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -77,79 +77,105 @@
 #define strasm_nobarrier                  ""
 #define strasm_nobarrier_clobber_memory   ""
 
+inline void pre_membar(atomic_memory_order order) {
+  switch (order) {
+    case memory_order_relaxed:
+    case memory_order_acquire: break;
+    case memory_order_release:
+    case memory_order_acq_rel: __asm__ __volatile__ (strasm_lwsync); break;
+    default /*conservative*/ : __asm__ __volatile__ (strasm_sync); break;
+  }
+}
+
+inline void post_membar(atomic_memory_order order) {
+  switch (order) {
+    case memory_order_relaxed:
+    case memory_order_release: break;
+    case memory_order_acquire:
+    case memory_order_acq_rel: __asm__ __volatile__ (strasm_isync); break;
+    default /*conservative*/ : __asm__ __volatile__ (strasm_sync); break;
+  }
+}
+
+
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
   template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest) const;
+  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
 };
 
 template<>
 template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+                                               atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
 
   D result;
 
+  pre_membar(order);
+
   __asm__ __volatile__ (
-    strasm_lwsync
     "1: lwarx   %0,  0, %2    \n"
     "   add     %0, %0, %1    \n"
     "   stwcx.  %0,  0, %2    \n"
     "   bne-    1b            \n"
-    strasm_isync
     : /*%0*/"=&r" (result)
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
+  post_membar(order);
+
   return result;
 }
 
 
 template<>
 template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+                                               atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
 
   D result;
 
+  pre_membar(order);
+
   __asm__ __volatile__ (
-    strasm_lwsync
     "1: ldarx   %0,  0, %2    \n"
     "   add     %0, %0, %1    \n"
     "   stdcx.  %0,  0, %2    \n"
     "   bne-    1b            \n"
-    strasm_isync
     : /*%0*/"=&r" (result)
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
+  post_membar(order);
+
   return result;
 }
 
 template<>
 template<typename T>
 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
-                                             T volatile* dest) const {
+                                             T volatile* dest,
+                                             atomic_memory_order order) const {
   // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
 
   T old_value;
   const uint64_t zero = 0;
 
+  pre_membar(order);
+
   __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
     /* atomic loop */
     "1:                                                 \n"
     "   lwarx   %[old_value], %[dest], %[zero]          \n"
     "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
     "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
     /* exit */
     "2:                                                 \n"
     /* out */
@@ -165,13 +191,16 @@
       "memory"
     );
 
+  post_membar(order);
+
   return old_value;
 }
 
 template<>
 template<typename T>
 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
-                                             T volatile* dest) const {
+                                             T volatile* dest,
+                                             atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
   // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
@@ -179,16 +208,14 @@
   T old_value;
   const uint64_t zero = 0;
 
+  pre_membar(order);
+
   __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
     /* atomic loop */
     "1:                                                 \n"
     "   ldarx   %[old_value], %[dest], %[zero]          \n"
     "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
     "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
     /* exit */
     "2:                                                 \n"
     /* out */
@@ -204,25 +231,9 @@
       "memory"
     );
 
-  return old_value;
-}
+  post_membar(order);
 
-inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
+  return old_value;
 }
 
 template<>
@@ -230,7 +241,7 @@
 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
-                                                cmpxchg_memory_order order) const {
+                                                atomic_memory_order order) const {
   STATIC_ASSERT(1 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
@@ -251,7 +262,7 @@
 
   unsigned int old_value, value32;
 
-  cmpxchg_pre_membar(order);
+  pre_membar(order);
 
   __asm__ __volatile__ (
     /* simple guard */
@@ -290,7 +301,7 @@
       "memory"
     );
 
-  cmpxchg_post_membar(order);
+  post_membar(order);
 
   return PrimitiveConversions::cast<T>((unsigned char)old_value);
 }
@@ -300,7 +311,7 @@
 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
-                                                cmpxchg_memory_order order) const {
+                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
@@ -310,7 +321,7 @@
   T old_value;
   const uint64_t zero = 0;
 
-  cmpxchg_pre_membar(order);
+  pre_membar(order);
 
   __asm__ __volatile__ (
     /* simple guard */
@@ -340,7 +351,7 @@
       "memory"
     );
 
-  cmpxchg_post_membar(order);
+  post_membar(order);
 
   return old_value;
 }
@@ -350,7 +361,7 @@
 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
-                                                cmpxchg_memory_order order) const {
+                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
@@ -360,7 +371,7 @@
   T old_value;
   const uint64_t zero = 0;
 
-  cmpxchg_pre_membar(order);
+  pre_membar(order);
 
   __asm__ __volatile__ (
     /* simple guard */
@@ -390,7 +401,7 @@
       "memory"
     );
 
-  cmpxchg_post_membar(order);
+  post_membar(order);
 
   return old_value;
 }