8186166: Generalize Atomic::cmpxchg with templates
authoreosterlund
Wed, 23 Aug 2017 14:01:17 +0200
changeset 46958 a13bd8c6b7a2
parent 46953 39063b484ec2
child 46959 1863b25339a9
8186166: Generalize Atomic::cmpxchg with templates Reviewed-by: dholmes, coleenp Contributed-by: kim.barrett@oracle.com
hotspot/src/os/bsd/vm/os_bsd.cpp
hotspot/src/os/solaris/vm/os_solaris.cpp
hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp
hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp
hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp
hotspot/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp
hotspot/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp
hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp
hotspot/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp
hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp
hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp
hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp
hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp
hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.il
hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp
hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp
hotspot/src/share/vm/aot/aotCodeHeap.cpp
hotspot/src/share/vm/aot/aotCodeHeap.hpp
hotspot/src/share/vm/gc/parallel/psParallelCompact.hpp
hotspot/src/share/vm/gc/shared/workgroup.cpp
hotspot/src/share/vm/metaprogramming/isRegisteredEnum.hpp
hotspot/src/share/vm/metaprogramming/primitiveConversions.hpp
hotspot/src/share/vm/oops/oop.inline.hpp
hotspot/src/share/vm/oops/oopsHierarchy.hpp
hotspot/src/share/vm/runtime/atomic.hpp
hotspot/src/share/vm/runtime/os.cpp
hotspot/src/share/vm/utilities/bitMap.cpp
hotspot/src/share/vm/utilities/bitMap.hpp
hotspot/src/share/vm/utilities/bitMap.inline.hpp
hotspot/test/native/metaprogramming/test_isRegisteredEnum.cpp
hotspot/test/native/metaprogramming/test_primitiveConversions.cpp
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Aug 23 14:01:17 2017 +0200
@@ -952,7 +952,7 @@
   if (now <= prev) {
     return prev;   // same or retrograde time;
   }
-  const uint64_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&Bsd::_max_abstime, prev);
+  const uint64_t obsv = Atomic::cmpxchg(now, &Bsd::_max_abstime, prev);
   assert(obsv >= prev, "invariant");   // Monotonicity
   // If the CAS succeeded then we're done and return "now".
   // If the CAS failed and the observed value "obsv" is >= now then
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Aug 23 14:01:17 2017 +0200
@@ -1197,7 +1197,7 @@
   if (now <= prev) {
     return prev;   // same or retrograde time;
   }
-  const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
+  const hrtime_t obsv = Atomic::cmpxchg(now, &max_hrtime, prev);
   assert(obsv >= prev, "invariant");   // Monotonicity
   // If the CAS succeeded then we're done and return "now".
   // If the CAS failed and the observed value "obsv" is >= now then
--- a/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -306,8 +306,13 @@
   }
 }
 
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_CAST(1 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
@@ -368,16 +373,22 @@
 
   cmpxchg_post_membar(order);
 
-  return (jbyte)(unsigned char)old_value;
+  return PrimitiveConversions::cast<T>((unsigned char)old_value);
 }
 
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_CAST(4 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
-  unsigned int old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   cmpxchg_pre_membar(order);
@@ -412,16 +423,22 @@
 
   cmpxchg_post_membar(order);
 
-  return (jint) old_value;
+  return old_value;
 }
 
-inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_CAST(8 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
-  long old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   cmpxchg_pre_membar(order);
@@ -456,15 +473,7 @@
 
   cmpxchg_post_membar(order);
 
-  return (jlong) old_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+  return old_value;
 }
 
 #undef strasm_sync
--- a/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -25,8 +25,6 @@
 #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
 #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
 
-#include "runtime/os.hpp"
-
 // Implementation of class atomic
 
 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
@@ -81,8 +79,13 @@
   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order /* order */) const {
+  STATIC_ASSERT(1 == sizeof(T));
   __asm__ volatile (  "lock cmpxchgb %1,(%3)"
                     : "=a" (exchange_value)
                     : "q" (exchange_value), "a" (compare_value), "r" (dest)
@@ -90,7 +93,13 @@
   return exchange_value;
 }
 
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order /* order */) const {
+  STATIC_ASSERT(4 == sizeof(T));
   __asm__ volatile (  "lock cmpxchgl %1,(%3)"
                     : "=a" (exchange_value)
                     : "r" (exchange_value), "a" (compare_value), "r" (dest)
@@ -137,7 +146,13 @@
   return exchange_value;
 }
 
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order /* order */) const {
+  STATIC_ASSERT(8 == sizeof(T));
   __asm__ __volatile__ (  "lock cmpxchgq %1,(%3)"
                         : "=a" (exchange_value)
                         : "r" (exchange_value), "a" (compare_value), "r" (dest)
@@ -145,14 +160,6 @@
   return exchange_value;
 }
 
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 
 #else // !AMD64
@@ -184,16 +191,14 @@
   void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
 }
 
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 }
 
 inline jlong Atomic::load(const volatile jlong* src) {
--- a/hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -57,9 +57,9 @@
 /* Perform an atomic compare and swap: if the current value of `*PTR'
    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
    `*PTR' before the operation.*/
-static inline int m68k_compare_and_swap(volatile int *ptr,
-                                        int oldval,
-                                        int newval) {
+static inline int m68k_compare_and_swap(int newval,
+                                        volatile int *ptr,
+                                        int oldval) {
   for (;;) {
       int prev = *ptr;
       if (prev != oldval)
@@ -118,9 +118,9 @@
 /* Perform an atomic compare and swap: if the current value of `*PTR'
    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
    `*PTR' before the operation.*/
-static inline int arm_compare_and_swap(volatile int *ptr,
-                                       int oldval,
-                                       int newval) {
+static inline int arm_compare_and_swap(int newval,
+                                       volatile int *ptr,
+                                       int oldval) {
   for (;;) {
       int prev = *ptr;
       if (prev != oldval)
@@ -267,55 +267,38 @@
                            (volatile intptr_t*) dest);
 }
 
-inline jint Atomic::cmpxchg(jint exchange_value,
-                            volatile jint* dest,
-                            jint compare_value,
-                            cmpxchg_memory_order order) {
+// No direct support for cmpxchg of bytes; emulate using int.
+template<>
+struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
+
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_CAST(4 == sizeof(T));
 #ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
+  return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
 #else
 #ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+  return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
 #else
   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 #endif // M68K
 #endif // ARM
 }
 
-inline jlong Atomic::cmpxchg(jlong exchange_value,
-                             volatile jlong* dest,
-                             jlong compare_value,
-                             cmpxchg_memory_order order) {
-
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_CAST(8 == sizeof(T));
   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 }
 
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
-                                    volatile intptr_t* dest,
-                                    intptr_t compare_value,
-                                    cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value,
-                                 volatile void* dest,
-                                 void* compare_value,
-                                 cmpxchg_memory_order order) {
-
-  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
-                              (volatile intptr_t*) dest,
-                              (intptr_t) compare_value,
-                              order);
-}
-
 inline jlong Atomic::load(const volatile jlong* src) {
   volatile jlong dest;
   os::atomic_copy64(src, &dest);
--- a/hotspot/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -85,9 +85,13 @@
                            (volatile intptr_t*) dest);
 }
 
-template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
-                                        T compare_value, cmpxchg_memory_order order)
-{
+template<size_t byte_size>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
+                                                        T volatile* dest,
+                                                        T compare_value,
+                                                        cmpxchg_memory_order order) const {
+  STATIC_ASSERT(byte_size == sizeof(T));
   if (order == memory_order_relaxed) {
     T value = compare_value;
     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
@@ -98,17 +102,6 @@
   }
 }
 
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
 
@@ -139,24 +132,6 @@
   return res;
 }
 
-inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order)
-{
-  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
-                              (volatile intptr_t*) dest,
-                              (intptr_t) compare_value,
-                              order);
-}
-
 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 
 #endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
--- a/hotspot/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -200,9 +200,38 @@
 
 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
 
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
+// No direct support for cmpxchg of bytes; emulate using int.
+template<>
+struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
+
+#ifndef AARCH64
+
+inline jint reorder_cmpxchg_func(jint exchange_value,
+                                 jint volatile* dest,
+                                 jint compare_value) {
+  // Warning:  Arguments are swapped to avoid moving them for kernel call
+  return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
+}
+
+inline jlong reorder_cmpxchg_long_func(jlong exchange_value,
+                                       jlong volatile* dest,
+                                       jlong compare_value) {
+  assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
+  // Warning:  Arguments are swapped to avoid moving them for kernel call
+  return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
+}
+
+#endif // !AARCH64
+
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(4 == sizeof(T));
 #ifdef AARCH64
-  jint rv;
+  T rv;
   int tmp;
   __asm__ volatile(
     "1:\n\t"
@@ -220,14 +249,19 @@
     : "memory");
   return rv;
 #else
-  // Warning:  Arguments are swapped to avoid moving them for kernel call
-  return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
+  return cmpxchg_using_helper<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
 #endif
 }
 
-inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(8 == sizeof(T));
 #ifdef AARCH64
-  jlong rv;
+  T rv;
   int tmp;
   __asm__ volatile(
     "1:\n\t"
@@ -245,21 +279,8 @@
     : "memory");
   return rv;
 #else
-  assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
-  return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
+  return cmpxchg_using_helper<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
 #endif
 }
 
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-#ifdef AARCH64
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-#else
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-#endif
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
-}
-
 #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
--- a/hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -306,8 +306,13 @@
   }
 }
 
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(1 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
@@ -368,16 +373,22 @@
 
   cmpxchg_post_membar(order);
 
-  return (jbyte)(unsigned char)old_value;
+  return PrimitiveConversions::cast<T>((unsigned char)old_value);
 }
 
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(4 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
-  unsigned int old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   cmpxchg_pre_membar(order);
@@ -412,16 +423,22 @@
 
   cmpxchg_post_membar(order);
 
-  return (jint) old_value;
+  return old_value;
 }
 
-inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(8 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
-  long old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   cmpxchg_pre_membar(order);
@@ -456,15 +473,7 @@
 
   cmpxchg_post_membar(order);
 
-  return (jlong) old_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+  return old_value;
 }
 
 #undef strasm_sync
--- a/hotspot/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -478,8 +478,18 @@
 // function is performed before the operand is fetched and again after the
 // operation is completed."
 
-jint Atomic::cmpxchg(jint xchg_val, volatile jint* dest, jint cmp_val, cmpxchg_memory_order unused) {
-  unsigned long old;
+// No direct support for cmpxchg of bytes; emulate using int.
+template<>
+struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
+
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val,
+                                                T volatile* dest,
+                                                T cmp_val,
+                                                cmpxchg_memory_order unused) const {
+  STATIC_ASSERT(4 == sizeof(T));
+  T old;
 
   __asm__ __volatile__ (
     "   CS       %[old],%[upd],%[mem]    \n\t" // Try to xchg upd with mem.
@@ -493,11 +503,17 @@
     : "cc", "memory"
   );
 
-  return (jint)old;
+  return old;
 }
 
-jlong Atomic::cmpxchg(jlong xchg_val, volatile jlong* dest, jlong cmp_val, cmpxchg_memory_order unused) {
-  unsigned long old;
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val,
+                                                T volatile* dest,
+                                                T cmp_val,
+                                                cmpxchg_memory_order unused) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  T old;
 
   __asm__ __volatile__ (
     "   CSG      %[old],%[upd],%[mem]    \n\t" // Try to xchg upd with mem.
@@ -511,15 +527,7 @@
     : "cc", "memory"
   );
 
-  return (jlong)old;
-}
-
-void* Atomic::cmpxchg_ptr(void *xchg_val, volatile void* dest, void* cmp_val, cmpxchg_memory_order unused) {
-  return (void*)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused);
-}
-
-intptr_t Atomic::cmpxchg_ptr(intptr_t xchg_val, volatile intptr_t* dest, intptr_t cmp_val, cmpxchg_memory_order unused) {
-  return (intptr_t)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused);
+  return old;
 }
 
 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
--- a/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -121,9 +121,18 @@
   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
+// No direct support for cmpxchg of bytes; emulate using int.
+template<>
+struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  jint rv;
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(4 == sizeof(T));
+  T rv;
   __asm__ volatile(
     " cas    [%2], %3, %0"
     : "=r" (rv)
@@ -132,8 +141,14 @@
   return rv;
 }
 
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  jlong rv;
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  T rv;
   __asm__ volatile(
     " casx   [%2], %3, %0"
     : "=r" (rv)
@@ -142,18 +157,4 @@
   return rv;
 }
 
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  intptr_t rv;
-  __asm__ volatile(
-    " casx    [%2], %3, %0"
-    : "=r" (rv)
-    : "0" (exchange_value), "r" (dest), "r" (compare_value)
-    : "memory");
-  return rv;
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
-}
-
 #endif // OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
--- a/hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -25,8 +25,6 @@
 #ifndef OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP
 #define OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP
 
-#include "runtime/os.hpp"
-
 // Implementation of class atomic
 
 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
@@ -81,8 +79,13 @@
   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order /* order */) const {
+  STATIC_ASSERT(1 == sizeof(T));
   __asm__ volatile ("lock cmpxchgb %1,(%3)"
                     : "=a" (exchange_value)
                     : "q" (exchange_value), "a" (compare_value), "r" (dest)
@@ -90,7 +93,13 @@
   return exchange_value;
 }
 
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order /* order */) const {
+  STATIC_ASSERT(4 == sizeof(T));
   __asm__ volatile ("lock cmpxchgl %1,(%3)"
                     : "=a" (exchange_value)
                     : "r" (exchange_value), "a" (compare_value), "r" (dest)
@@ -137,7 +146,13 @@
   return exchange_value;
 }
 
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order /* order */) const {
+  STATIC_ASSERT(8 == sizeof(T));
   __asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
                         : "=a" (exchange_value)
                         : "r" (exchange_value), "a" (compare_value), "r" (dest)
@@ -145,14 +160,6 @@
   return exchange_value;
 }
 
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 
 #else // !AMD64
@@ -184,16 +191,14 @@
   void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
 }
 
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 }
 
 inline jlong Atomic::load(const volatile jlong* src) {
--- a/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -57,9 +57,9 @@
 /* Perform an atomic compare and swap: if the current value of `*PTR'
    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
    `*PTR' before the operation.*/
-static inline int m68k_compare_and_swap(volatile int *ptr,
-                                        int oldval,
-                                        int newval) {
+static inline int m68k_compare_and_swap(int newval,
+                                        volatile int *ptr,
+                                        int oldval) {
   for (;;) {
       int prev = *ptr;
       if (prev != oldval)
@@ -118,9 +118,9 @@
 /* Perform an atomic compare and swap: if the current value of `*PTR'
    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
    `*PTR' before the operation.*/
-static inline int arm_compare_and_swap(volatile int *ptr,
-                                       int oldval,
-                                       int newval) {
+static inline int arm_compare_and_swap(int newval,
+                                       volatile int *ptr,
+                                       int oldval) {
   for (;;) {
       int prev = *ptr;
       if (prev != oldval)
@@ -261,55 +261,38 @@
                            (volatile intptr_t*) dest);
 }
 
-inline jint Atomic::cmpxchg(jint exchange_value,
-                            volatile jint* dest,
-                            jint compare_value,
-                            cmpxchg_memory_order order) {
+// No direct support for cmpxchg of bytes; emulate using int.
+template<>
+struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
+
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(4 == sizeof(T));
 #ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
+  return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
 #else
 #ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+  return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
 #else
   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 #endif // M68K
 #endif // ARM
 }
 
-inline jlong Atomic::cmpxchg(jlong exchange_value,
-                             volatile jlong* dest,
-                             jlong compare_value,
-                             cmpxchg_memory_order order) {
-
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(8 == sizeof(T));
   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 }
 
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
-                                    volatile intptr_t* dest,
-                                    intptr_t compare_value,
-                                    cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value,
-                                 volatile void* dest,
-                                 void* compare_value,
-                                 cmpxchg_memory_order order) {
-
-  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
-                              (volatile intptr_t*) dest,
-                              (intptr_t) compare_value,
-                              order);
-}
-
 inline jlong Atomic::load(const volatile jlong* src) {
   volatile jlong dest;
   os::atomic_copy64(src, &dest);
--- a/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -25,8 +25,6 @@
 #ifndef OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_HPP
 #define OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_HPP
 
-#include "runtime/os.hpp"
-
 // Implementation of class atomic
 
 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
@@ -64,10 +62,6 @@
 extern "C" jint     _Atomic_swap32(jint     exchange_value, volatile jint*     dest);
 extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
 
-extern "C" jint     _Atomic_cas32(jint     exchange_value, volatile jint*     dest, jint     compare_value);
-extern "C" intptr_t _Atomic_cas64(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
-extern "C" jlong    _Atomic_casl (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value);
-
 extern "C" jint     _Atomic_add32(jint     inc,       volatile jint*     dest);
 extern "C" intptr_t _Atomic_add64(intptr_t add_value, volatile intptr_t* dest);
 
@@ -97,22 +91,40 @@
   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
+// No direct support for cmpxchg of bytes; emulate using int.
+template<>
+struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cas32(exchange_value, dest, compare_value);
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(4 == sizeof(T));
+  T rv;
+  __asm__ volatile(
+    " cas    [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+  return rv;
 }
 
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  // Return 64 bit value in %o0
-  return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cas64(exchange_value, dest, compare_value);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  T rv;
+  __asm__ volatile(
+    " casx   [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+  return rv;
 }
 
 #endif // OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_HPP
--- a/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Wed Aug 23 14:01:17 2017 +0200
@@ -73,74 +73,6 @@
         .end
 
 
-  // Support for jint Atomic::cmpxchg(jint           exchange_value,
-  //                                  volatile jint* dest,
-  //                                  jint           compare_value)
-  //
-  // Arguments:
-  //      exchange_value: O0
-  //      dest:           O1
-  //      compare_value:  O2
-  //
-  // Results:
-  //     O0: the value previously stored in dest
-
-        .inline _Atomic_cas32, 3
-        .volatile
-        cas     [%o1], %o2, %o0
-        .nonvolatile
-        .end
-
-
-  // Support for intptr_t Atomic::cmpxchg_ptr(intptr_t           exchange_value,
-  //                                          volatile intptr_t* dest,
-  //                                          intptr_t           compare_value)
-  //
-  // 64-bit
-  //
-  // Arguments:
-  //      exchange_value: O0
-  //      dest:           O1
-  //      compare_value:  O2
-  //
-  // Results:
-  //     O0: the value previously stored in dest
-
-        .inline _Atomic_cas64, 3
-        .volatile
-        casx    [%o1], %o2, %o0
-        .nonvolatile
-        .end
-
-
-  // Support for jlong Atomic::cmpxchg(jlong           exchange_value,
-  //                                   volatile jlong* dest,
-  //                                   jlong           compare_value)
-  //
-  // 32-bit calling conventions
-  //
-  // Arguments:
-  //      exchange_value: O1:O0
-  //      dest:           O2
-  //      compare_value:  O4:O3
-  //
-  // Results:
-  //     O1:O0: the value previously stored in dest
-
-        .inline _Atomic_casl, 3
-        .volatile
-        sllx    %o0, 32, %o0
-        srl     %o1, 0, %o1
-        or      %o0,%o1,%o0
-        sllx    %o3, 32, %o3
-        srl     %o4, 0, %o4
-        or      %o3,%o4,%o3
-        casx    [%o2], %o3, %o0
-        srl     %o0, 0, %o1
-        srlx    %o0, 32, %o0
-        .nonvolatile
-        .end
-
   // Support for jlong Atomic::load and Atomic::store on v9.
   //
   // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
--- a/hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -25,8 +25,6 @@
 #ifndef OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
 #define OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
 
-#include "runtime/os.hpp"
-
 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
 inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
 inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
@@ -49,8 +47,7 @@
 inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
 inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
 
-// For Sun Studio - implementation is in solaris_x86_[32/64].il.
-// For gcc - implementation is just below.
+// For Sun Studio - implementation is in solaris_x86_64.il.
 
 extern "C" {
   jint _Atomic_add(jint add_value, volatile jint* dest);
@@ -71,21 +68,51 @@
   return _Atomic_xchg(exchange_value, dest);
 }
 
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value);
+// Not using cmpxchg_using_helper here, because some configurations of
+// Solaris compiler don't deal well with passing a "defined in .il"
+// function as an argument.  We *should* switch to using gcc-style
+// inline assembly, but attempting to do so with Studio 12.4 ran into
+// segfaults.
+
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(1 == sizeof(T));
+  return PrimitiveConversions::cast<T>(
+    _Atomic_cmpxchg_byte(PrimitiveConversions::cast<jbyte>(exchange_value),
+                         reinterpret_cast<jbyte volatile*>(dest),
+                         PrimitiveConversions::cast<jbyte>(compare_value)));
 }
 
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg(exchange_value, dest, compare_value);
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(4 == sizeof(T));
+  return PrimitiveConversions::cast<T>(
+    _Atomic_cmpxchg(PrimitiveConversions::cast<jint>(exchange_value),
+                    reinterpret_cast<jint volatile*>(dest),
+                    PrimitiveConversions::cast<jint>(compare_value)));
 }
 
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value);
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  return PrimitiveConversions::cast<T>(
+    _Atomic_cmpxchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
+                         reinterpret_cast<jlong volatile*>(dest),
+                         PrimitiveConversions::cast<jlong>(compare_value)));
 }
 
-
-#ifdef AMD64
 inline void Atomic::store    (jlong    store_value, jlong*             dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
@@ -107,59 +134,6 @@
   return (void*)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
 }
 
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
-}
-
 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 
-#else // !AMD64
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-extern "C" void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
-
-inline jlong Atomic::load(const volatile jlong* src) {
-  volatile jlong dest;
-  _Atomic_move_long(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, dest);
-}
-
-#endif // AMD64
-
-
 #endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
--- a/hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -109,26 +109,22 @@
   return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-    return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value);
-}
+#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName)               \
+  template<>                                                            \
+  template<typename T>                                                  \
+  inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
+                                                         T volatile* dest, \
+                                                         T compare_value, \
+                                                         cmpxchg_memory_order order) const { \
+    STATIC_ASSERT(ByteSize == sizeof(T));                               \
+    return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
+  }
 
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
-}
+DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
+DEFINE_STUB_CMPXCHG(4, jint,  os::atomic_cmpxchg_func)
+DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
 
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
+#undef DEFINE_STUB_CMPXCHG
 
 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 
@@ -201,8 +197,13 @@
   return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
 }
 
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(1 == sizeof(T));
   // alternative for InterlockedCompareExchange
   __asm {
     mov edx, dest
@@ -212,7 +213,13 @@
   }
 }
 
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(4 == sizeof(T));
   // alternative for InterlockedCompareExchange
   __asm {
     mov edx, dest
@@ -222,7 +229,13 @@
   }
 }
 
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_ASSERT(8 == sizeof(T));
   jint ex_lo  = (jint)exchange_value;
   jint ex_hi  = *( ((jint*)&exchange_value) + 1 );
   jint cmp_lo = (jint)compare_value;
@@ -241,14 +254,6 @@
   }
 }
 
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
 inline jlong Atomic::load(const volatile jlong* src) {
   volatile jlong dest;
   volatile jlong* pdest = &dest;
--- a/hotspot/src/share/vm/aot/aotCodeHeap.cpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/share/vm/aot/aotCodeHeap.cpp	Wed Aug 23 14:01:17 2017 +0200
@@ -316,7 +316,7 @@
   AOTCompiledMethod *aot = new AOTCompiledMethod(code, mh(), meta, metadata_table, metadata_size, state_adr, this, name, code_id, _aot_id);
   assert(_code_to_aot[code_id]._aot == NULL, "should be not initialized");
   _code_to_aot[code_id]._aot = aot; // Should set this first
-  if (Atomic::cmpxchg(in_use, (jint*)&_code_to_aot[code_id]._state, not_set) != not_set) {
+  if (Atomic::cmpxchg(in_use, &_code_to_aot[code_id]._state, not_set) != not_set) {
     _code_to_aot[code_id]._aot = NULL; // Clean
   } else { // success
     // Publish method
@@ -378,7 +378,7 @@
     AOTCompiledMethod* aot = new AOTCompiledMethod(entry, NULL, meta, metadata_table, metadata_size, state_adr, this, full_name, code_id, i);
     assert(_code_to_aot[code_id]._aot  == NULL, "should be not initialized");
     _code_to_aot[code_id]._aot  = aot;
-    if (Atomic::cmpxchg(in_use, (jint*)&_code_to_aot[code_id]._state, not_set) != not_set) {
+    if (Atomic::cmpxchg(in_use, &_code_to_aot[code_id]._state, not_set) != not_set) {
       fatal("stab '%s' code state is %d", full_name, _code_to_aot[code_id]._state);
     }
     // Adjust code buffer boundaries only for stubs because they are last in the buffer.
@@ -649,7 +649,7 @@
     for (int i = 0; i < methods_cnt; ++i) {
       int code_id = indexes[i];
       // Invalidate aot code.
-      if (Atomic::cmpxchg(invalid, (jint*)&_code_to_aot[code_id]._state, not_set) != not_set) {
+      if (Atomic::cmpxchg(invalid, &_code_to_aot[code_id]._state, not_set) != not_set) {
         if (_code_to_aot[code_id]._state == in_use) {
           AOTCompiledMethod* aot = _code_to_aot[code_id]._aot;
           assert(aot != NULL, "aot should be set");
--- a/hotspot/src/share/vm/aot/aotCodeHeap.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/share/vm/aot/aotCodeHeap.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -26,6 +26,8 @@
 
 #include "aot/aotCompiledMethod.hpp"
 #include "classfile/symbolTable.hpp"
+#include "metaprogramming/integralConstant.hpp"
+#include "metaprogramming/isRegisteredEnum.hpp"
 #include "oops/metadata.hpp"
 #include "oops/method.hpp"
 
@@ -35,6 +37,8 @@
   invalid = 2  // AOT code is invalidated because dependencies failed
 };
 
+template<> struct IsRegisteredEnum<CodeState> : public TrueType {};
+
 typedef struct {
   AOTCompiledMethod* _aot;
   CodeState _state; // State change cases: not_set->in_use, not_set->invalid
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -593,9 +593,8 @@
 
 inline bool ParallelCompactData::RegionData::claim()
 {
-  const int los = (int) live_obj_size();
-  const int old = Atomic::cmpxchg(dc_claimed | los,
-                                  (volatile int*) &_dc_and_los, los);
+  const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
+  const region_sz_t old = Atomic::cmpxchg(dc_claimed | los, &_dc_and_los, los);
   return old == los;
 }
 
--- a/hotspot/src/share/vm/gc/shared/workgroup.cpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/workgroup.cpp	Wed Aug 23 14:01:17 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -428,7 +428,7 @@
   assert(t < _n_tasks, "bad task id.");
   uint old = _tasks[t];
   if (old == 0) {
-    old = Atomic::cmpxchg(1, &_tasks[t], 0);
+    old = Atomic::cmpxchg(1u, &_tasks[t], 0u);
   }
   assert(_tasks[t] == 1, "What else?");
   bool res = old != 0;
@@ -442,15 +442,15 @@
 }
 
 void SubTasksDone::all_tasks_completed(uint n_threads) {
-  jint observed = _threads_completed;
-  jint old;
+  uint observed = _threads_completed;
+  uint old;
   do {
     old = observed;
     observed = Atomic::cmpxchg(old+1, &_threads_completed, old);
   } while (observed != old);
   // If this was the last thread checking in, clear the tasks.
   uint adjusted_thread_count = (n_threads == 0 ? 1 : n_threads);
-  if (observed + 1 == (jint)adjusted_thread_count) {
+  if (observed + 1 == adjusted_thread_count) {
     clear();
   }
 }
@@ -474,8 +474,8 @@
 bool SequentialSubTasksDone::is_task_claimed(uint& t) {
   t = _n_claimed;
   while (t < _n_tasks) {
-    jint res = Atomic::cmpxchg(t+1, &_n_claimed, t);
-    if (res == (jint)t) {
+    uint res = Atomic::cmpxchg(t+1, &_n_claimed, t);
+    if (res == t) {
       return false;
     }
     t = res;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/metaprogramming/isRegisteredEnum.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_METAPROGRAMMING_ISREGISTEREDENUM_HPP
+#define SHARE_VM_METAPROGRAMMING_ISREGISTEREDENUM_HPP
+
+#include "memory/allocation.hpp"
+#include "metaprogramming/integralConstant.hpp"
+
+// Recognize registered enum types.
+// Registration is by specializing this trait.
+//
+// This is a manual stand-in for the C++11 std::is_enum<T> type trait.
+// It's a lot of work to implement is_enum portably in C++98, so this
+// manual approach is being taken for those enum types we need to
+// distinguish.
+template<typename T>
+struct IsRegisteredEnum : public FalseType {};
+
+#endif // SHARE_VM_METAPROGRAMMING_ISREGISTEREDENUM_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/metaprogramming/primitiveConversions.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_METAPROGRAMMING_PRIMITIVECONVERSIONS_HPP
+#define SHARE_VM_METAPROGRAMMING_PRIMITIVECONVERSIONS_HPP
+
+#include "memory/allocation.hpp"
+#include "metaprogramming/enableIf.hpp"
+#include "metaprogramming/integralConstant.hpp"
+#include "metaprogramming/isFloatingPoint.hpp"
+#include "metaprogramming/isIntegral.hpp"
+#include "metaprogramming/isRegisteredEnum.hpp"
+#include "utilities/debug.hpp"
+
+class PrimitiveConversions : public AllStatic {
+public:
+  // Return a value of type T with the same representation as x.
+  //
+  // T and U must be of the same size.
+  //
+  // At least one of T or U must be an integral type.  The other must
+  // be an integral, floating point, or pointer type.
+  template<typename T, typename U> static T cast(U x);
+
+  // Support thin wrappers over primitive types.
+  // If derived from TrueType, provides representational conversion
+  // from T to some other type.  When true, must provide
+  // - Value: typedef for T.
+  // - Decayed: typedef for decayed type.
+  // - static Decayed decay(T x): return value of type Decayed with
+  //   the same representation as x.
+  // - static T recover(Decayed x): return a value of type T with the
+  //   same representation as x.
+  template<typename T> struct Translate : public FalseType {};
+
+private:
+
+  template<typename T,
+           typename U,
+           bool same_size = sizeof(T) == sizeof(U),
+           typename Enable = void>
+  struct Cast;
+
+  template<typename T, typename U> static T cast_using_union(U x);
+};
+
+// Return an object of type T with the same value representation as x.
+//
+// T and U must be of the same size.  It is expected that one of T and
+// U is an integral type, and the other is an integral type, a
+// (registered) enum type, or a floating point type
+//
+// This implementation uses the "union trick", which seems to be the
+// best of a bad set of options.  Though technically undefined
+// behavior, it is widely and well supported, producing good code.  In
+// some cases, such as gcc, that support is explicitly documented.
+//
+// Using memcpy is the correct method, but some compilers produce
+// wretched code for that method, even at maximal optimization levels.
+//
+// Using static_cast is only possible for integral and enum types, not
+// for floating point types.  And for integral and enum conversions,
+// static_cast has unspecified or implementation-defined behavior for
+// some cases.  C++11 <type_traits> can be used to avoid most or all
+// of those unspecified or implementation-defined issues, though that
+// may require multi-step conversions.
+//
+// Using reinterpret_cast of references has undefined behavior for
+// many cases, and there is much less empirical basis for its use, as
+// compared to the union trick.
+template<typename T, typename U>
+inline T PrimitiveConversions::cast_using_union(U x) {
+  STATIC_ASSERT(sizeof(T) == sizeof(U));
+  union { T t; U u; };
+  u = x;
+  return t;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// cast<T>(x)
+//
+// Cast<T, U, same_size, Enable>
+
+// Give an informative error if the sizes differ.
+template<typename T, typename U>
+struct PrimitiveConversions::Cast<T, U, false> VALUE_OBJ_CLASS_SPEC {
+  STATIC_ASSERT(sizeof(T) == sizeof(U));
+};
+
+// Conversion between integral types.
+template<typename T, typename U>
+struct PrimitiveConversions::Cast<
+  T, U, true,
+  typename EnableIf<IsIntegral<T>::value && IsIntegral<U>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  T operator()(U x) const { return cast_using_union<T>(x); }
+};
+
+// Convert an enum or floating point value to an integer value.
+template<typename T, typename U>
+struct PrimitiveConversions::Cast<
+  T, U, true,
+  typename EnableIf<IsIntegral<T>::value &&
+                    (IsRegisteredEnum<U>::value ||
+                     IsFloatingPoint<U>::value)>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  T operator()(U x) const { return cast_using_union<T>(x); }
+};
+
+// Convert an integer to an enum or floating point value.
+template<typename T, typename U>
+struct PrimitiveConversions::Cast<
+  T, U, true,
+  typename EnableIf<IsIntegral<U>::value &&
+                    (IsRegisteredEnum<T>::value ||
+                     IsFloatingPoint<T>::value)>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  T operator()(U x) const { return cast_using_union<T>(x); }
+};
+
+// Convert a pointer to an integral value.
+template<typename T, typename U>
+struct PrimitiveConversions::Cast<
+  T, U*, true,
+  typename EnableIf<IsIntegral<T>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  T operator()(U* x) const { return reinterpret_cast<T>(x); }
+};
+
+// Convert an integral value to a pointer.
+template<typename T, typename U>
+struct PrimitiveConversions::Cast<
+  T*, U, true,
+  typename EnableIf<IsIntegral<U>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  T* operator()(U x) const { return reinterpret_cast<T*>(x); }
+};
+
+template<typename T, typename U>
+inline T PrimitiveConversions::cast(U x) {
+  return Cast<T, U>()(x);
+}
+
+#endif // SHARE_VM_METAPROGRAMMING_PRIMITIVECONVERSIONS_HPP
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -94,7 +94,7 @@
 }
 
 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
-  return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
+  return Atomic::cmpxchg(new_mark, &_mark, old_mark);
 }
 
 void oopDesc::init_mark() {
@@ -408,14 +408,14 @@
     narrowOop val = encode_heap_oop(exchange_value);
     narrowOop cmp = encode_heap_oop(compare_value);
 
-    narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
+    narrowOop old = Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
     // decode old from T to oop
     return decode_heap_oop(old);
   } else {
     if (prebarrier) {
       update_barrier_set_pre((oop*)dest, exchange_value);
     }
-    return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
+    return Atomic::cmpxchg(exchange_value, (oop*)dest, compare_value);
   }
 }
 
@@ -619,7 +619,7 @@
   assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
 
   while (!oldMark->is_marked()) {
-    curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark);
+    curMark = Atomic::cmpxchg(forwardPtrMark, &_mark, oldMark);
     assert(is_forwarded(), "object should have been forwarded");
     if (curMark == oldMark) {
       return NULL;
--- a/hotspot/src/share/vm/oops/oopsHierarchy.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/share/vm/oops/oopsHierarchy.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_OOPS_OOPSHIERARCHY_HPP
 #define SHARE_VM_OOPS_OOPSHIERARCHY_HPP
 
+#include "metaprogramming/integralConstant.hpp"
+#include "metaprogramming/primitiveConversions.hpp"
 #include "runtime/globals.hpp"
 #include "utilities/globalDefinitions.hpp"
 
@@ -142,6 +144,15 @@
   operator oop* () const              { return (oop *)obj(); }
 };
 
+template<>
+struct PrimitiveConversions::Translate<oop> : public TrueType {
+  typedef oop Value;
+  typedef oopDesc* Decayed;
+
+  static Decayed decay(Value x) { return x.obj(); }
+  static Value recover(Decayed x) { return oop(x); }
+};
+
 #define DEF_OOP(type)                                                      \
    class type##OopDesc;                                                    \
    class type##Oop : public oop {                                          \
--- a/hotspot/src/share/vm/runtime/atomic.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/share/vm/runtime/atomic.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -26,6 +26,11 @@
 #define SHARE_VM_RUNTIME_ATOMIC_HPP
 
 #include "memory/allocation.hpp"
+#include "metaprogramming/enableIf.hpp"
+#include "metaprogramming/isIntegral.hpp"
+#include "metaprogramming/isSame.hpp"
+#include "metaprogramming/primitiveConversions.hpp"
+#include "metaprogramming/removeCV.hpp"
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
 
@@ -111,13 +116,132 @@
   // *dest with exchange_value if the comparison succeeded. Returns prior
   // value of *dest. cmpxchg*() provide:
   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
-  inline static jbyte        cmpxchg    (jbyte        exchange_value, volatile jbyte*        dest, jbyte        compare_value, cmpxchg_memory_order order = memory_order_conservative);
-  inline static jint         cmpxchg    (jint         exchange_value, volatile jint*         dest, jint         compare_value, cmpxchg_memory_order order = memory_order_conservative);
-  // See comment above about using jlong atomics on 32-bit platforms
-  inline static jlong        cmpxchg    (jlong        exchange_value, volatile jlong*        dest, jlong        compare_value, cmpxchg_memory_order order = memory_order_conservative);
-  inline static unsigned int cmpxchg    (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative);
-  inline static intptr_t     cmpxchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest, intptr_t     compare_value, cmpxchg_memory_order order = memory_order_conservative);
-  inline static void*        cmpxchg_ptr(void*        exchange_value, volatile void*         dest, void*        compare_value, cmpxchg_memory_order order = memory_order_conservative);
+
+  template<typename T, typename D, typename U>
+  inline static D cmpxchg(T exchange_value,
+                          D volatile* dest,
+                          U compare_value,
+                          cmpxchg_memory_order order = memory_order_conservative);
+
+  // Performs atomic compare of *dest and NULL, and replaces *dest
+  // with exchange_value if the comparison succeeded.  Returns true if
+  // the comparison succeeded and the exchange occurred.  This is
+  // often used as part of lazy initialization, as a lock-free
+  // alternative to the Double-Checked Locking Pattern.
+  template<typename T, typename D>
+  inline static bool replace_if_null(T* value, D* volatile* dest,
+                                     cmpxchg_memory_order order = memory_order_conservative);
+
+  inline static intptr_t cmpxchg_ptr(intptr_t exchange_value,
+                                     volatile intptr_t* dest,
+                                     intptr_t compare_value,
+                                     cmpxchg_memory_order order = memory_order_conservative) {
+    return cmpxchg(exchange_value, dest, compare_value, order);
+  }
+
+  inline static void* cmpxchg_ptr(void* exchange_value,
+                                  volatile void* dest,
+                                  void* compare_value,
+                                  cmpxchg_memory_order order = memory_order_conservative) {
+    return cmpxchg(exchange_value,
+                   reinterpret_cast<void* volatile*>(dest),
+                   compare_value,
+                   order);
+  }
+
+private:
+  // Test whether From is implicitly convertible to To.
+  // From and To must be pointer types.
+  // Note: Provides the limited subset of C++11 std::is_convertible
+  // that is needed here.
+  template<typename From, typename To> struct IsPointerConvertible;
+
+  // Dispatch handler for cmpxchg.  Provides type-based validity
+  // checking and limited conversions around calls to the
+  // platform-specific implementation layer provided by
+  // PlatformCmpxchg.
+  template<typename T, typename D, typename U, typename Enable = void>
+  struct CmpxchgImpl;
+
+  // Platform-specific implementation of cmpxchg.  Support for sizes
+  // of 1, 4, and 8 are required.  The class is a function object that
+  // must be default constructable, with these requirements:
+  //
+  // - dest is of type T*.
+  // - exchange_value and compare_value are of type T.
+  // - order is of type cmpxchg_memory_order.
+  // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
+  //
+  // Then
+  //   platform_cmpxchg(exchange_value, dest, compare_value, order)
+  // must be a valid expression, returning a result convertible to T.
+  //
+  // A default definition is provided, which declares a function template
+  //   T operator()(T, T volatile*, T, cmpxchg_memory_order) const
+  //
+  // For each required size, a platform must either provide an
+  // appropriate definition of that function, or must entirely
+  // specialize the class template for that size.
+  template<size_t byte_size> struct PlatformCmpxchg;
+
+  // Support for platforms that implement some variants of cmpxchg
+  // using a (typically out of line) non-template helper function.
+  // The generic arguments passed to PlatformCmpxchg need to be
+  // translated to the appropriate type for the helper function, the
+  // helper invoked on the translated arguments, and the result
+  // translated back.  Type is the parameter / return type of the
+  // helper function.
+  template<typename Type, typename Fn, typename T>
+  static T cmpxchg_using_helper(Fn fn,
+                                T exchange_value,
+                                T volatile* dest,
+                                T compare_value);
+
+  // Support platforms that do not provide Read-Modify-Write
+  // byte-level atomic access. To use, derive PlatformCmpxchg<1> from
+  // this class.
+public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
+  struct CmpxchgByteUsingInt;
+private:
+};
+
+template<typename From, typename To>
+struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
+  // Determine whether From* is implicitly convertible to To*, using
+  // the "sizeof trick".
+  typedef char yes;
+  typedef char (&no)[2];
+
+  static yes test(To*);
+  static no test(...);
+  static From* test_value;
+
+  static const bool value = (sizeof(yes) == sizeof(test(test_value)));
+};
+
+// Define the class before including platform file, which may specialize
+// the operator definition.  No generic definition of specializations
+// of the operator template are provided, nor are there any generic
+// specializations of the class.  The platform file is responsible for
+// providing those.
+template<size_t byte_size>
+struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC {
+  template<typename T>
+  T operator()(T exchange_value,
+               T volatile* dest,
+               T compare_value,
+               cmpxchg_memory_order order) const;
+};
+
+// Define the class before including platform file, which may use this
+// as a base class, requiring it be complete.  The definition is later
+// in this file, near the other definitions related to cmpxchg.
+struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
+  template<typename T>
+  T operator()(T exchange_value,
+               T volatile* dest,
+               T compare_value,
+               cmpxchg_memory_order order) const;
 };
 
 // platform specific in-line definitions - must come before shared definitions
@@ -143,61 +267,152 @@
   dec_ptr((volatile intptr_t*) dest);
 }
 
-#ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-/*
- * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
- * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
- * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
- * implementation to be used instead.
- */
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest,
-                             jbyte compare_value, cmpxchg_memory_order order) {
-  STATIC_ASSERT(sizeof(jbyte) == 1);
-  volatile jint* dest_int =
-      reinterpret_cast<volatile jint*>(align_down(dest, sizeof(jint)));
-  size_t offset = pointer_delta(dest, dest_int, 1);
-  jint cur = *dest_int;
-  jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur);
+template<typename T, typename D, typename U>
+inline D Atomic::cmpxchg(T exchange_value,
+                         D volatile* dest,
+                         U compare_value,
+                         cmpxchg_memory_order order) {
+  return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
+}
+
+template<typename T, typename D>
+inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
+                                    cmpxchg_memory_order order) {
+  // Presently using a trivial implementation in terms of cmpxchg.
+  // Consider adding platform support, to permit the use of compiler
+  // intrinsics like gcc's __sync_bool_compare_and_swap.
+  D* expected_null = NULL;
+  return expected_null == cmpxchg(value, dest, expected_null, order);
+}
+
+// Handle cmpxchg for integral and enum types.
+//
+// All the involved types must be identical.
+template<typename T>
+struct Atomic::CmpxchgImpl<
+  T, T, T,
+  typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  T operator()(T exchange_value, T volatile* dest, T compare_value,
+               cmpxchg_memory_order order) const {
+    // Forward to the platform handler for the size of T.
+    return PlatformCmpxchg<sizeof(T)>()(exchange_value,
+                                        dest,
+                                        compare_value,
+                                        order);
+  }
+};
+
+// Handle cmpxchg for pointer types.
+//
+// The destination's type and the compare_value type must be the same,
+// ignoring cv-qualifiers; we don't care about the cv-qualifiers of
+// the compare_value.
+//
+// The exchange_value must be implicitly convertible to the
+// destination's type; it must be type-correct to store the
+// exchange_value in the destination.
+template<typename T, typename D, typename U>
+struct Atomic::CmpxchgImpl<
+  T*, D*, U*,
+  typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
+                    IsSame<typename RemoveCV<D>::type,
+                           typename RemoveCV<U>::type>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  D* operator()(T* exchange_value, D* volatile* dest, U* compare_value,
+               cmpxchg_memory_order order) const {
+    // Allow derived to base conversion, and adding cv-qualifiers.
+    D* new_value = exchange_value;
+    // Don't care what the CV qualifiers for compare_value are,
+    // but we need to match D* when calling platform support.
+    D* old_value = const_cast<D*>(compare_value);
+    return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order);
+  }
+};
+
+// Handle cmpxchg for types that have a translator.
+//
+// All the involved types must be identical.
+//
+// This translates the original call into a call on the decayed
+// arguments, and returns the recovered result of that translated
+// call.
+template<typename T>
+struct Atomic::CmpxchgImpl<
+  T, T, T,
+  typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  T operator()(T exchange_value, T volatile* dest, T compare_value,
+               cmpxchg_memory_order order) const {
+    typedef PrimitiveConversions::Translate<T> Translator;
+    typedef typename Translator::Decayed Decayed;
+    STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
+    return Translator::recover(
+      cmpxchg(Translator::decay(exchange_value),
+              reinterpret_cast<Decayed volatile*>(dest),
+              Translator::decay(compare_value),
+              order));
+  }
+};
+
+template<typename Type, typename Fn, typename T>
+inline T Atomic::cmpxchg_using_helper(Fn fn,
+                                      T exchange_value,
+                                      T volatile* dest,
+                                      T compare_value) {
+  STATIC_ASSERT(sizeof(Type) == sizeof(T));
+  return PrimitiveConversions::cast<T>(
+    fn(PrimitiveConversions::cast<Type>(exchange_value),
+       reinterpret_cast<Type volatile*>(dest),
+       PrimitiveConversions::cast<Type>(compare_value)));
+}
+
+template<typename T>
+inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
+                                                 T volatile* dest,
+                                                 T compare_value,
+                                                 cmpxchg_memory_order order) const {
+  STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
+  uint8_t canon_exchange_value = exchange_value;
+  uint8_t canon_compare_value = compare_value;
+  volatile uint32_t* aligned_dest
+    = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
+  size_t offset = pointer_delta(dest, aligned_dest, 1);
+  uint32_t cur = *aligned_dest;
+  uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur);
 
   // current value may not be what we are looking for, so force it
   // to that value so the initial cmpxchg will fail if it is different
-  cur_as_bytes[offset] = compare_value;
+  cur_as_bytes[offset] = canon_compare_value;
 
   // always execute a real cmpxchg so that we get the required memory
   // barriers even on initial failure
   do {
     // value to swap in matches current value ...
-    jint new_value = cur;
+    uint32_t new_value = cur;
     // ... except for the one jbyte we want to update
-    reinterpret_cast<jbyte*>(&new_value)[offset] = exchange_value;
+    reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
 
-    jint res = cmpxchg(new_value, dest_int, cur, order);
-    if (res == cur) break; // success
+    uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
+    if (res == cur) break;      // success
 
-    // at least one jbyte in the jint changed value, so update
-    // our view of the current jint
+    // at least one byte in the int changed value, so update
+    // our view of the current int
     cur = res;
-    // if our jbyte is still as cur we loop and try again
-  } while (cur_as_bytes[offset] == compare_value);
+    // if our byte is still as cur we loop and try again
+  } while (cur_as_bytes[offset] == canon_compare_value);
 
-  return cur_as_bytes[offset];
+  return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
 }
 
-#endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-
 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
 }
 
-inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
-                         volatile unsigned int* dest, unsigned int compare_value,
-                         cmpxchg_memory_order order) {
-  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
-  return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
-                                       (jint)compare_value, order);
-}
-
 inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
   // Most platforms do not support atomic add on a 2-byte value. However,
   // if the value occupies the most significant 16 bits of an aligned 32-bit
--- a/hotspot/src/share/vm/runtime/os.cpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/share/vm/runtime/os.cpp	Wed Aug 23 14:01:17 2017 +0200
@@ -755,9 +755,9 @@
   // Make updating the random seed thread safe.
   while (true) {
     unsigned int seed = _rand_seed;
-    int rand = random_helper(seed);
+    unsigned int rand = random_helper(seed);
     if (Atomic::cmpxchg(rand, &_rand_seed, seed) == seed) {
-      return rand;
+      return static_cast<int>(rand);
     }
   }
 }
--- a/hotspot/src/share/vm/utilities/bitMap.cpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/share/vm/utilities/bitMap.cpp	Wed Aug 23 14:01:17 2017 +0200
@@ -210,12 +210,12 @@
   // With a valid range (beg <= end), this test ensures that end != 0, as
   // required by inverted_bit_mask_for_range.  Also avoids an unnecessary write.
   if (beg != end) {
-    intptr_t* pw  = (intptr_t*)word_addr(beg);
-    intptr_t  w   = *pw;
-    intptr_t  mr  = (intptr_t)inverted_bit_mask_for_range(beg, end);
-    intptr_t  nw  = value ? (w | ~mr) : (w & mr);
+    bm_word_t* pw = word_addr(beg);
+    bm_word_t  w  = *pw;
+    bm_word_t  mr = inverted_bit_mask_for_range(beg, end);
+    bm_word_t  nw = value ? (w | ~mr) : (w & mr);
     while (true) {
-      intptr_t res = Atomic::cmpxchg_ptr(nw, pw, w);
+      bm_word_t res = Atomic::cmpxchg(nw, pw, w);
       if (res == w) break;
       w  = res;
       nw = value ? (w | ~mr) : (w & mr);
@@ -617,7 +617,7 @@
   return true;
 }
 
-BitMap::idx_t* BitMap::_pop_count_table = NULL;
+const BitMap::idx_t* BitMap::_pop_count_table = NULL;
 
 void BitMap::init_pop_count_table() {
   if (_pop_count_table == NULL) {
@@ -626,11 +626,8 @@
       table[i] = num_set_bits(i);
     }
 
-    intptr_t res = Atomic::cmpxchg_ptr((intptr_t)  table,
-                                       (intptr_t*) &_pop_count_table,
-                                       (intptr_t)  NULL_WORD);
-    if (res != NULL_WORD) {
-      guarantee( _pop_count_table == (void*) res, "invariant" );
+    if (!Atomic::replace_if_null(table, &_pop_count_table)) {
+      guarantee(_pop_count_table != NULL, "invariant");
       FREE_C_HEAP_ARRAY(idx_t, table);
     }
   }
--- a/hotspot/src/share/vm/utilities/bitMap.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/share/vm/utilities/bitMap.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -114,7 +114,7 @@
   void verify_range(idx_t beg_index, idx_t end_index) const NOT_DEBUG_RETURN;
 
   // Statistics.
-  static idx_t* _pop_count_table;
+  static const idx_t* _pop_count_table;
   static void init_pop_count_table();
   static idx_t num_set_bits(bm_word_t w);
   static idx_t num_set_bits_from_table(unsigned char c);
--- a/hotspot/src/share/vm/utilities/bitMap.inline.hpp	Wed Aug 23 10:25:25 2017 +0200
+++ b/hotspot/src/share/vm/utilities/bitMap.inline.hpp	Wed Aug 23 14:01:17 2017 +0200
@@ -49,9 +49,7 @@
     if (new_val == old_val) {
       return false;     // Someone else beat us to it.
     }
-    const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
-                                                      (volatile void*) addr,
-                                                      (void*) old_val);
+    const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
     if (cur_val == old_val) {
       return true;      // Success.
     }
@@ -70,9 +68,7 @@
     if (new_val == old_val) {
       return false;     // Someone else beat us to it.
     }
-    const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
-                                                      (volatile void*) addr,
-                                                      (void*) old_val);
+    const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
     if (cur_val == old_val) {
       return true;      // Success.
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/native/metaprogramming/test_isRegisteredEnum.cpp	Wed Aug 23 14:01:17 2017 +0200
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/allocation.hpp"
+#include "metaprogramming/integralConstant.hpp"
+#include "metaprogramming/isRegisteredEnum.hpp"
+
+#include "unittest.hpp"
+
+struct IsRegisteredEnumTest : AllStatic {
+  enum A { A_x, A_y, A_z };
+  enum B { B_x, B_y, B_z };
+};
+
+typedef IsRegisteredEnumTest::A A;
+typedef IsRegisteredEnumTest::B B;
+
+template<> struct IsRegisteredEnum<A> : public TrueType {};
+
+STATIC_ASSERT(!IsRegisteredEnum<int>::value);
+STATIC_ASSERT(IsRegisteredEnum<A>::value);
+STATIC_ASSERT(!IsRegisteredEnum<B>::value);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/native/metaprogramming/test_primitiveConversions.cpp	Wed Aug 23 14:01:17 2017 +0200
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/allocation.hpp"
+#include "metaprogramming/isSame.hpp"
+#include "metaprogramming/primitiveConversions.hpp"
+#include "unittest.hpp"
+#include "utilities/debug.hpp"
+
+struct PrimitiveConversionsTestSupport: AllStatic {
+
+  template<size_t byte_size> struct SignedTypeOfSize;
+  template<size_t byte_size> struct UnsignedTypeOfSize;
+
+  template<typename T> struct Signed;
+  template<typename T> struct Unsigned;
+};
+
+#define DEFINE_CANONICAL_SIGNED_TYPE(T)                                 \
+  template<>                                                            \
+  struct PrimitiveConversionsTestSupport::SignedTypeOfSize<sizeof(T)>   \
+    : public AllStatic                                                  \
+  {                                                                     \
+    typedef T type;                                                     \
+  };
+
+#define DEFINE_CANONICAL_UNSIGNED_TYPE(T)                               \
+  template<>                                                            \
+  struct PrimitiveConversionsTestSupport::UnsignedTypeOfSize<sizeof(T)> \
+    : public AllStatic                                                  \
+  {                                                                     \
+    typedef T type;                                                     \
+  };
+
+#define DEFINE_INTEGER_TYPES_OF_SIZE(NBITS)            \
+  DEFINE_CANONICAL_SIGNED_TYPE(int ## NBITS ## _t)     \
+  DEFINE_CANONICAL_UNSIGNED_TYPE(uint ## NBITS ## _t)
+
+DEFINE_INTEGER_TYPES_OF_SIZE(8)
+DEFINE_INTEGER_TYPES_OF_SIZE(16)
+DEFINE_INTEGER_TYPES_OF_SIZE(32)
+DEFINE_INTEGER_TYPES_OF_SIZE(64)
+
+#undef DEFINE_INTEGER_TYPES_OF_SIZE
+#undef DEFINE_CANONICAL_SIGNED_TYPE
+#undef DEFINE_CANONICAL_UNSIGNED_TYPE
+
+template<typename T>
+struct PrimitiveConversionsTestSupport::Signed
+  : public SignedTypeOfSize<sizeof(T)>
+{};
+
+template<typename T>
+struct PrimitiveConversionsTestSupport::Unsigned
+  : public UnsignedTypeOfSize<sizeof(T)>
+{};
+
+TEST(PrimitiveConversionsTest, round_trip_int) {
+  int  sfive = 5;
+  int  mfive = -5;
+  uint ufive = 5u;
+
+  typedef PrimitiveConversionsTestSupport::Signed<int>::type SI;
+  typedef PrimitiveConversionsTestSupport::Unsigned<int>::type UI;
+
+  EXPECT_EQ(sfive, PrimitiveConversions::cast<int>(PrimitiveConversions::cast<SI>(sfive)));
+  EXPECT_EQ(sfive, PrimitiveConversions::cast<int>(PrimitiveConversions::cast<UI>(sfive)));
+
+  EXPECT_EQ(mfive, PrimitiveConversions::cast<int>(PrimitiveConversions::cast<SI>(mfive)));
+  EXPECT_EQ(mfive, PrimitiveConversions::cast<int>(PrimitiveConversions::cast<UI>(mfive)));
+
+  EXPECT_EQ(ufive, PrimitiveConversions::cast<uint>(PrimitiveConversions::cast<SI>(ufive)));
+  EXPECT_EQ(ufive, PrimitiveConversions::cast<uint>(PrimitiveConversions::cast<UI>(ufive)));
+}
+
+TEST(PrimitiveConversionsTest, round_trip_float) {
+  float  ffive = 5.0f;
+  double dfive = 5.0;
+
+  typedef PrimitiveConversionsTestSupport::Signed<float>::type SF;
+  typedef PrimitiveConversionsTestSupport::Unsigned<float>::type UF;
+
+  typedef PrimitiveConversionsTestSupport::Signed<double>::type SD;
+  typedef PrimitiveConversionsTestSupport::Unsigned<double>::type UD;
+
+  EXPECT_EQ(ffive, PrimitiveConversions::cast<float>(PrimitiveConversions::cast<SF>(ffive)));
+  EXPECT_EQ(ffive, PrimitiveConversions::cast<float>(PrimitiveConversions::cast<UF>(ffive)));
+
+  EXPECT_EQ(dfive, PrimitiveConversions::cast<double>(PrimitiveConversions::cast<SD>(dfive)));
+  EXPECT_EQ(dfive, PrimitiveConversions::cast<double>(PrimitiveConversions::cast<UD>(dfive)));
+}
+
+TEST(PrimitiveConversionsTest, round_trip_ptr) {
+  int five = 5;
+  int* pfive = &five;
+  const int* cpfive = &five;
+
+  typedef PrimitiveConversionsTestSupport::Signed<int*>::type SIP;
+  typedef PrimitiveConversionsTestSupport::Unsigned<int*>::type UIP;
+
+  EXPECT_EQ(pfive, PrimitiveConversions::cast<int*>(PrimitiveConversions::cast<SIP>(pfive)));
+  EXPECT_EQ(pfive, PrimitiveConversions::cast<int*>(PrimitiveConversions::cast<UIP>(pfive)));
+
+  EXPECT_EQ(cpfive, PrimitiveConversions::cast<const int*>(PrimitiveConversions::cast<SIP>(cpfive)));
+  EXPECT_EQ(cpfive, PrimitiveConversions::cast<const int*>(PrimitiveConversions::cast<UIP>(cpfive)));
+}