hotspot/src/share/vm/runtime/atomic.hpp
changeset 40655 9f644073d3a0
parent 39404 d0ad5220e91c
child 40669 252f9d8272af
--- a/hotspot/src/share/vm/runtime/atomic.hpp	Sun Aug 21 06:18:09 2016 +0200
+++ b/hotspot/src/share/vm/runtime/atomic.hpp	Sun Aug 21 20:56:37 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #define SHARE_VM_RUNTIME_ATOMIC_HPP
 
 #include "memory/allocation.hpp"
+#include "utilities/macros.hpp"
 
 enum cmpxchg_memory_order {
   memory_order_relaxed,
@@ -119,24 +120,107 @@
   inline static void*        cmpxchg_ptr(void*        exchange_value, volatile void*         dest, void*        compare_value, cmpxchg_memory_order order = memory_order_conservative);
 };
 
-// To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially
-// aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to
-// achieve is to place your short value next to another short value, which doesn't need atomic ops.
-//
-// Example
-//  ATOMIC_SHORT_PAIR(
-//    volatile short _refcount,  // needs atomic operation
-//    unsigned short _length     // number of UTF8 characters in the symbol (does not need atomic op)
-//  );
+// platform specific in-line definitions - must come before shared definitions
+
+#include OS_CPU_HEADER(atomic)
 
-#ifdef VM_LITTLE_ENDIAN
-  #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
-    non_atomic_decl;                                       \
-    atomic_decl
-#else
-  #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
-    atomic_decl;                                           \
-    non_atomic_decl
+// shared in-line definitions
+
+// size_t casts...
+#if (SIZE_MAX != UINTPTR_MAX)
+#error size_t is not WORD_SIZE, interesting platform, but missing implementation here
 #endif
 
+inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
+  return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
+}
+
+inline void Atomic::inc(volatile size_t* dest) {
+  inc_ptr((volatile intptr_t*) dest);
+}
+
+inline void Atomic::dec(volatile size_t* dest) {
+  dec_ptr((volatile intptr_t*) dest);
+}
+
+#ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+/*
+ * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
+ * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
+ * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
+ * implementation to be used instead.
+ */
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte *dest, jbyte comparand, cmpxchg_memory_order order)
+{
+  assert(sizeof(jbyte) == 1, "assumption.");
+  uintptr_t dest_addr = (uintptr_t)dest;
+  uintptr_t offset = dest_addr % sizeof(jint);
+  volatile jint* dest_int = (volatile jint*)(dest_addr - offset);
+  jint cur = *dest_int;
+  jbyte* cur_as_bytes = (jbyte*)(&cur);
+  jint new_val = cur;
+  jbyte* new_val_as_bytes = (jbyte*)(&new_val);
+  new_val_as_bytes[offset] = exchange_value;
+  while (cur_as_bytes[offset] == comparand) {
+    jint res = cmpxchg(new_val, dest_int, cur, order);
+    if (res == cur) break;
+    cur = res;
+    new_val = cur;
+    new_val_as_bytes[offset] = exchange_value;
+  }
+  return cur_as_bytes[offset];
+}
+#endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+
+inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
+  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
+  return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
+                         volatile unsigned int* dest, unsigned int compare_value,
+                         cmpxchg_memory_order order) {
+  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
+  return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
+                                       (jint)compare_value, order);
+}
+
+inline jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
+  jlong old = load(dest);
+  jlong new_value = old + add_value;
+  while (old != cmpxchg(new_value, dest, old)) {
+    old = load(dest);
+    new_value = old + add_value;
+  }
+  return old;
+}
+
+inline void Atomic::inc(volatile short* dest) {
+  // Most platforms do not support atomic increment on a 2-byte value. However,
+  // if the value occupies the most significant 16 bits of an aligned 32-bit
+  // word, then we can do this with an atomic add of 0x10000 to the 32-bit word.
+  //
+  // The least significant parts of this 32-bit word will never be affected, even
+  // in case of overflow/underflow.
+  //
+  // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
+#ifdef VM_LITTLE_ENDIAN
+  assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
+  (void)Atomic::add(0x10000, (volatile int*)(dest-1));
+#else
+  assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
+  (void)Atomic::add(0x10000, (volatile int*)(dest));
+#endif
+}
+
+inline void Atomic::dec(volatile short* dest) {
+#ifdef VM_LITTLE_ENDIAN
+  assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
+  (void)Atomic::add(-0x10000, (volatile int*)(dest-1));
+#else
+  assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
+  (void)Atomic::add(-0x10000, (volatile int*)(dest));
+#endif
+}
+
 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP