hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp
changeset 35063 cb24277be2e7
parent 25715 d5a8dbdc5150
child 35594 cc13089c6327
--- a/hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Fri Dec 11 09:08:08 2015 +0100
+++ b/hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Thu Dec 10 15:27:16 2015 +0100
@@ -291,6 +291,71 @@
   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
+  // (see atomic.hpp).
+
+  // Using 32 bit internally.
+  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
+
+#ifdef VM_LITTLE_ENDIAN
+  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
+#else
+  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
+#endif
+  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
+                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
+                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
+
+  unsigned int old_value, value32;
+
+  __asm__ __volatile__ (
+    /* fence */
+    strasm_sync
+    /* simple guard */
+    "   lbz     %[old_value], 0(%[dest])                  \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* atomic loop */
+    "1:                                                   \n"
+    "   lwarx   %[value32], 0, %[dest_base]               \n"
+    /* extract byte and compare */
+    "   srd     %[old_value], %[value32], %[shift_amount] \n"
+    "   clrldi  %[old_value], %[old_value], 56            \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* replace byte and try to store */
+    "   xor     %[value32], %[xor_value], %[value32]      \n"
+    "   stwcx.  %[value32], 0, %[dest_base]               \n"
+    "   bne-    1b                                        \n"
+    /* acquire */
+    strasm_sync
+    /* exit */
+    "2:                                                   \n"
+    /* out */
+    : [old_value]           "=&r"   (old_value),
+      [value32]             "=&r"   (value32),
+                            "=m"    (*dest),
+                            "=m"    (*dest_base)
+    /* in */
+    : [dest]                "b"     (dest),
+      [dest_base]           "b"     (dest_base),
+      [shift_amount]        "r"     (shift_amount),
+      [masked_compare_val]  "r"     (masked_compare_val),
+      [xor_value]           "r"     (xor_value),
+                            "m"     (*dest),
+                            "m"     (*dest_base)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jbyte)(unsigned char)old_value;
+}
+
 inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
 
   // Note that cmpxchg guarantees a two-way memory barrier across