8186903: Remove j-types from Atomic
authorcoleenp
Tue, 19 Dec 2017 06:29:17 -0500
changeset 48468 7cc7de9bf4a4
parent 48467 7969cc1b94ee
child 48469 7312ae4465d6
8186903: Remove j-types from Atomic Summary: Make jlong into int64_t, atomic_FN_long into atomic_FN_int64, make jbyte to u_char. Reviewed-by: dholmes, dcubed
src/hotspot/cpu/x86/stubGenerator_x86_32.cpp
src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
src/hotspot/cpu/zero/stubGenerator_zero.cpp
src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp
src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s
src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp
src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp
src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
src/hotspot/os_cpu/linux_arm/os_linux_arm.hpp
src/hotspot/os_cpu/linux_sparc/os_linux_sparc.hpp
src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp
src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp
src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.hpp
src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp
src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp
src/hotspot/os_cpu/solaris_x86/os_solaris_x86.hpp
src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp
src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp
src/hotspot/share/runtime/atomic.hpp
src/hotspot/share/runtime/stubRoutines.cpp
src/hotspot/share/runtime/stubRoutines.hpp
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Tue Dec 19 06:29:17 2017 -0500
@@ -433,7 +433,7 @@
 
 
   //----------------------------------------------------------------------------------------------------
-  // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest)
+  // Support for int32_t Atomic::xchg(int32_t exchange_value, volatile int32_t* dest)
   //
   // xchg exists as far back as 8086, lock needed for MP only
   // Stack layout immediately after call:
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Tue Dec 19 06:29:17 2017 -0500
@@ -611,8 +611,8 @@
     return start;
   }
 
-  // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest,
-  //                                          jbyte compare_value)
+  // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest,
+  //                                           int8_t compare_value)
   //
   // Arguments :
   //    c_rarg0: exchange_value
@@ -637,9 +637,9 @@
     return start;
   }
 
-  // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value,
-  //                                          volatile jlong* dest,
-  //                                          jlong compare_value)
+  // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value,
+  //                                            volatile int64_t* dest,
+  //                                            int64_t compare_value)
   // Arguments :
   //    c_rarg0: exchange_value
   //    c_rarg1: dest
@@ -694,8 +694,8 @@
   // Result:
   //    *dest += add_value
   //    return *dest;
-  address generate_atomic_add_ptr() {
-    StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
+  address generate_atomic_add_long() {
+    StubCodeMark mark(this, "StubRoutines", "atomic_add_long");
     address start = __ pc();
 
     __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
@@ -5015,14 +5015,14 @@
     StubRoutines::_catch_exception_entry = generate_catch_exception();
 
     // atomic calls
-    StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
-    StubRoutines::_atomic_xchg_long_entry    = generate_atomic_xchg_long();
-    StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
-    StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
-    StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
-    StubRoutines::_atomic_add_entry          = generate_atomic_add();
-    StubRoutines::_atomic_add_ptr_entry      = generate_atomic_add_ptr();
-    StubRoutines::_fence_entry               = generate_orderaccess_fence();
+    StubRoutines::_atomic_xchg_entry          = generate_atomic_xchg();
+    StubRoutines::_atomic_xchg_long_entry     = generate_atomic_xchg_long();
+    StubRoutines::_atomic_cmpxchg_entry       = generate_atomic_cmpxchg();
+    StubRoutines::_atomic_cmpxchg_byte_entry  = generate_atomic_cmpxchg_byte();
+    StubRoutines::_atomic_cmpxchg_long_entry  = generate_atomic_cmpxchg_long();
+    StubRoutines::_atomic_add_entry           = generate_atomic_add();
+    StubRoutines::_atomic_add_long_entry      = generate_atomic_add_long();
+    StubRoutines::_fence_entry                = generate_orderaccess_fence();
 
     // platform dependent
     StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
--- a/src/hotspot/cpu/zero/stubGenerator_zero.cpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/cpu/zero/stubGenerator_zero.cpp	Tue Dec 19 06:29:17 2017 -0500
@@ -258,7 +258,7 @@
     StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_add_entry          = ShouldNotCallThisStub();
-    StubRoutines::_atomic_add_ptr_entry      = ShouldNotCallThisStub();
+    StubRoutines::_atomic_add_long_entry     = ShouldNotCallThisStub();
     StubRoutines::_fence_entry               = ShouldNotCallThisStub();
   }
 
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -132,8 +132,8 @@
 
 extern "C" {
   // defined in bsd_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
-  void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
+  int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t, bool);
+  void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
 }
 
 template<>
@@ -143,15 +143,15 @@
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
-  return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 }
 
 template<>
 template<typename T>
 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
   STATIC_ASSERT(8 == sizeof(T));
-  volatile jlong dest;
-  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  volatile int64_t dest;
+  _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
   return PrimitiveConversions::cast<T>(dest);
 }
 
@@ -160,7 +160,7 @@
 inline void Atomic::PlatformStore<8>::operator()(T store_value,
                                                  T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
-  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
+  _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
 
 #endif // AMD64
--- a/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s	Tue Dec 19 06:29:17 2017 -0500
@@ -633,10 +633,10 @@
         ret
 
 
-        # Support for jlong Atomic::cmpxchg(jlong exchange_value,
-        #                                   volatile jlong* dest,
-        #                                   jlong compare_value,
-        #                                   bool is_MP)
+        # Support for int64_t Atomic::cmpxchg(int64_t exchange_value,
+        #                                     volatile int64_t* dest,
+        #                                     int64_t compare_value,
+        #                                     bool is_MP)
         #
         .p2align 4,,15
         ELF_TYPE(_Atomic_cmpxchg_long,@function)
@@ -658,8 +658,8 @@
         ret
 
 
-        # Support for jlong Atomic::load and Atomic::store.
-        # void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst)
+        # Support for int64_t Atomic::load and Atomic::store.
+        # void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst)
         .p2align 4,,15
         ELF_TYPE(_Atomic_move_long,@function)
 SYMBOL(_Atomic_move_long):
--- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -265,8 +265,8 @@
 template<typename T>
 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
   STATIC_ASSERT(8 == sizeof(T));
-  volatile jlong dest;
-  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  volatile int64_t dest;
+  os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
   return PrimitiveConversions::cast<T>(dest);
 }
 
@@ -275,7 +275,7 @@
 inline void Atomic::PlatformStore<8>::operator()(T store_value,
                                                  T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
-  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
+  os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
 
 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
--- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -50,7 +50,7 @@
 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
   STATIC_ASSERT(8 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    (*os::atomic_load_long_func)(reinterpret_cast<const volatile jlong*>(src)));
+    (*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
 }
 
 template<>
@@ -59,7 +59,7 @@
                                                  T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
   (*os::atomic_store_long_func)(
-    PrimitiveConversions::cast<jlong>(store_value), reinterpret_cast<volatile jlong*>(dest));
+    PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
 #endif
 
@@ -103,7 +103,7 @@
     : "memory");
   return val;
 #else
-  return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
+  return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
 #endif
 }
 
@@ -146,7 +146,7 @@
     : "memory");
   return old_val;
 #else
-  return xchg_using_helper<jint>(os::atomic_xchg_func, exchange_value, dest);
+  return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
 #endif
 }
 
@@ -178,17 +178,17 @@
 
 #ifndef AARCH64
 
-inline jint reorder_cmpxchg_func(jint exchange_value,
-                                 jint volatile* dest,
-                                 jint compare_value) {
+inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
+                                    int32_t volatile* dest,
+                                    int32_t compare_value) {
   // Warning:  Arguments are swapped to avoid moving them for kernel call
   return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
 }
 
-inline jlong reorder_cmpxchg_long_func(jlong exchange_value,
-                                       jlong volatile* dest,
-                                       jlong compare_value) {
-  assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
+inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
+                                         int64_t volatile* dest,
+                                         int64_t compare_value) {
+  assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
   // Warning:  Arguments are swapped to avoid moving them for kernel call
   return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
 }
@@ -221,7 +221,7 @@
     : "memory");
   return rv;
 #else
-  return cmpxchg_using_helper<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
 #endif
 }
 
@@ -251,7 +251,7 @@
     : "memory");
   return rv;
 #else
-  return cmpxchg_using_helper<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
 #endif
 }
 
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Tue Dec 19 06:29:17 2017 -0500
@@ -598,11 +598,11 @@
 
 #ifndef AARCH64
 
-typedef jlong cmpxchg_long_func_t(jlong, jlong, volatile jlong*);
+typedef int64_t cmpxchg_long_func_t(int64_t, int64_t, volatile int64_t*);
 
 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
 
-jlong os::atomic_cmpxchg_long_bootstrap(jlong compare_value, jlong exchange_value, volatile jlong* dest) {
+int64_t os::atomic_cmpxchg_long_bootstrap(int64_t compare_value, int64_t exchange_value, volatile int64_t* dest) {
   // try to use the stub:
   cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
 
@@ -612,16 +612,16 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jlong old_value = *dest;
+  int64_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
 }
-typedef jlong load_long_func_t(const volatile jlong*);
+typedef int64_t load_long_func_t(const volatile int64_t*);
 
 load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap;
 
-jlong os::atomic_load_long_bootstrap(const volatile jlong* src) {
+int64_t os::atomic_load_long_bootstrap(const volatile int64_t* src) {
   // try to use the stub:
   load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry());
 
@@ -631,15 +631,15 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jlong old_value = *src;
+  int64_t old_value = *src;
   return old_value;
 }
 
-typedef void store_long_func_t(jlong, volatile jlong*);
+typedef void store_long_func_t(int64_t, volatile int64_t*);
 
 store_long_func_t* os::atomic_store_long_func = os::atomic_store_long_bootstrap;
 
-void os::atomic_store_long_bootstrap(jlong val, volatile jlong* dest) {
+void os::atomic_store_long_bootstrap(int64_t val, volatile int64_t* dest) {
   // try to use the stub:
   store_long_func_t* func = CAST_TO_FN_PTR(store_long_func_t*, StubRoutines::atomic_store_long_entry());
 
@@ -652,11 +652,11 @@
   *dest = val;
 }
 
-typedef jint  atomic_add_func_t(jint add_value, volatile jint *dest);
+typedef int32_t  atomic_add_func_t(int32_t add_value, volatile int32_t *dest);
 
 atomic_add_func_t * os::atomic_add_func = os::atomic_add_bootstrap;
 
-jint  os::atomic_add_bootstrap(jint add_value, volatile jint *dest) {
+int32_t  os::atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest) {
   atomic_add_func_t * func = CAST_TO_FN_PTR(atomic_add_func_t*,
                                             StubRoutines::atomic_add_entry());
   if (func != NULL) {
@@ -664,16 +664,16 @@
     return (*func)(add_value, dest);
   }
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   *dest = old_value + add_value;
   return (old_value + add_value);
 }
 
-typedef jint  atomic_xchg_func_t(jint exchange_value, volatile jint *dest);
+typedef int32_t  atomic_xchg_func_t(int32_t exchange_value, volatile int32_t *dest);
 
 atomic_xchg_func_t * os::atomic_xchg_func = os::atomic_xchg_bootstrap;
 
-jint  os::atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest) {
+int32_t  os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest) {
   atomic_xchg_func_t * func = CAST_TO_FN_PTR(atomic_xchg_func_t*,
                                             StubRoutines::atomic_xchg_entry());
   if (func != NULL) {
@@ -681,16 +681,16 @@
     return (*func)(exchange_value, dest);
   }
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   *dest = exchange_value;
   return (old_value);
 }
 
-typedef jint cmpxchg_func_t(jint, jint, volatile jint*);
+typedef int32_t cmpxchg_func_t(int32_t, int32_t, volatile int32_t*);
 
 cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
 
-jint os::atomic_cmpxchg_bootstrap(jint compare_value, jint exchange_value, volatile jint* dest) {
+int32_t os::atomic_cmpxchg_bootstrap(int32_t compare_value, int32_t exchange_value, volatile int32_t* dest) {
   // try to use the stub:
   cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
 
@@ -700,7 +700,7 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -45,35 +45,35 @@
   static bool register_code_area(char *low, char *high) { return true; }
 
 #ifndef AARCH64
-  static jlong (*atomic_cmpxchg_long_func)(jlong compare_value,
-                                           jlong exchange_value,
-                                           volatile jlong *dest);
+  static int64_t (*atomic_cmpxchg_long_func)(int64_t compare_value,
+                                             int64_t exchange_value,
+                                             volatile int64_t *dest);
 
-  static jlong (*atomic_load_long_func)(const volatile jlong*);
+  static int64_t (*atomic_load_long_func)(const volatile int64_t*);
 
-  static void (*atomic_store_long_func)(jlong, volatile jlong*);
+  static void (*atomic_store_long_func)(int64_t, volatile int64_t*);
 
-  static jint  (*atomic_add_func)(jint add_value, volatile jint *dest);
+  static int32_t  (*atomic_add_func)(int32_t add_value, volatile int32_t *dest);
 
-  static jint  (*atomic_xchg_func)(jint exchange_value, volatile jint *dest);
+  static int32_t  (*atomic_xchg_func)(int32_t exchange_value, volatile int32_t *dest);
 
-  static jint  (*atomic_cmpxchg_func)(jint compare_value,
-                                      jint exchange_value,
-                                      volatile jint *dest);
+  static int32_t  (*atomic_cmpxchg_func)(int32_t compare_value,
+                                         int32_t exchange_value,
+                                         volatile int32_t *dest);
 
-  static jlong atomic_cmpxchg_long_bootstrap(jlong, jlong, volatile jlong*);
+  static int64_t atomic_cmpxchg_long_bootstrap(int64_t, int64_t, volatile int64_t*);
 
-  static jlong atomic_load_long_bootstrap(const volatile jlong*);
+  static int64_t atomic_load_long_bootstrap(const volatile int64_t*);
 
-  static void atomic_store_long_bootstrap(jlong, volatile jlong*);
+  static void atomic_store_long_bootstrap(int64_t, volatile int64_t*);
 
-  static jint  atomic_add_bootstrap(jint add_value, volatile jint *dest);
+  static int32_t  atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest);
 
-  static jint  atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest);
+  static int32_t  atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest);
 
-  static jint  atomic_cmpxchg_bootstrap(jint compare_value,
-                                        jint exchange_value,
-                                        volatile jint *dest);
+  static int32_t  atomic_cmpxchg_bootstrap(int32_t compare_value,
+                                           int32_t exchange_value,
+                                           volatile int32_t *dest);
 #endif // !AARCH64
 
 #endif // OS_CPU_LINUX_ARM_VM_OS_LINUX_ARM_HPP
--- a/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,15 +28,15 @@
   //
   // NOTE: we are back in class os here, not Linux
   //
-  static jint  (*atomic_xchg_func)        (jint,  volatile jint*);
-  static jint  (*atomic_cmpxchg_func)     (jint,  volatile jint*,  jint);
-  static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
-  static jint  (*atomic_add_func)         (jint,  volatile jint*);
+  static int32_t  (*atomic_xchg_func)        (int32_t,  volatile int32_t*);
+  static int32_t  (*atomic_cmpxchg_func)     (int32_t,  volatile int32_t*, int32_t);
+  static int64_t  (*atomic_cmpxchg_long_func)(int64_t,  volatile int64_t*, int64_t);
+  static int32_t  (*atomic_add_func)         (int32_t,  volatile int32_t*);
 
-  static jint  atomic_xchg_bootstrap        (jint,  volatile jint*);
-  static jint  atomic_cmpxchg_bootstrap     (jint,  volatile jint*,  jint);
-  static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
-  static jint  atomic_add_bootstrap         (jint,  volatile jint*);
+  static int32_t  atomic_xchg_bootstrap        (int32_t,  volatile int32_t*);
+  static int32_t  atomic_cmpxchg_bootstrap     (int32_t,  volatile int32_t*, int32_t);
+  static int64_t  atomic_cmpxchg_long_bootstrap(int64_t,  volatile int64_t*, int64_t);
+  static int32_t  atomic_add_bootstrap         (int32_t,  volatile int32_t*);
 
   static void setup_fpu() {}
 
--- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -133,8 +133,8 @@
 
 extern "C" {
   // defined in linux_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong);
-  void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
+  int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
+  void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
 }
 
 template<>
@@ -144,15 +144,15 @@
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
-  return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
+  return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 }
 
 template<>
 template<typename T>
 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
   STATIC_ASSERT(8 == sizeof(T));
-  volatile jlong dest;
-  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  volatile int64_t dest;
+  _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
   return PrimitiveConversions::cast<T>(dest);
 }
 
@@ -161,7 +161,7 @@
 inline void Atomic::PlatformStore<8>::operator()(T store_value,
                                                  T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
-  _Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
+  _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
 
 #endif // AMD64
--- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -185,8 +185,8 @@
 template<typename T>
 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
   STATIC_ASSERT(8 == sizeof(T));
-  volatile jlong dest;
-  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
+  volatile int64_t dest;
+  os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
   return PrimitiveConversions::cast<T>(dest);
 }
 
@@ -195,7 +195,7 @@
 inline void Atomic::PlatformStore<8>::operator()(T store_value,
                                                  T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
-  os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
+  os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
 
 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
--- a/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,15 +28,15 @@
   //
   // NOTE: we are back in class os here, not Solaris
   //
-  static jint  (*atomic_xchg_func)        (jint,  volatile jint*);
-  static jint  (*atomic_cmpxchg_func)     (jint,  volatile jint*,  jint);
-  static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
-  static jint  (*atomic_add_func)         (jint,  volatile jint*);
+  static int32_t  (*atomic_xchg_func)        (int32_t,  volatile int32_t*);
+  static int32_t  (*atomic_cmpxchg_func)     (int32_t,  volatile int32_t*,  int32_t);
+  static int64_t  (*atomic_cmpxchg_long_func)(int64_t,  volatile int64_t*,  int64_t);
+  static int32_t  (*atomic_add_func)         (int32_t,  volatile int32_t*);
 
-  static jint  atomic_xchg_bootstrap        (jint,  volatile jint*);
-  static jint  atomic_cmpxchg_bootstrap     (jint,  volatile jint*,  jint);
-  static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
-  static jint  atomic_add_bootstrap         (jint,  volatile jint*);
+  static int32_t  atomic_xchg_bootstrap        (int32_t,  volatile int32_t*);
+  static int32_t  atomic_cmpxchg_bootstrap     (int32_t,  volatile int32_t*,  int32_t);
+  static int64_t  atomic_cmpxchg_long_bootstrap(int64_t,  volatile int64_t*,  int64_t);
+  static int32_t  atomic_add_bootstrap         (int32_t,  volatile int32_t*);
 
   static void setup_fpu() {}
 
--- a/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -28,16 +28,16 @@
 // For Sun Studio - implementation is in solaris_x86_64.il.
 
 extern "C" {
-  jint _Atomic_add(jint add_value, volatile jint* dest);
-  jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
+  int32_t _Atomic_add(int32_t add_value, volatile int32_t* dest);
+  int64_t _Atomic_add_long(int64_t add_value, volatile int64_t* dest);
 
-  jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
-  jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
-                             jbyte compare_value);
-  jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
-                       jint compare_value);
-  jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
-                             jlong compare_value);
+  int32_t _Atomic_xchg(int32_t exchange_value, volatile int32_t* dest);
+  int8_t  _Atomic_cmpxchg_byte(int8_t exchange_value, volatile int8_t* dest,
+                               int8_t compare_value);
+  int32_t _Atomic_cmpxchg(int32_t exchange_value, volatile int32_t* dest,
+                          int32_t compare_value);
+  int64_t _Atomic_cmpxchg_long(int64_t exchange_value, volatile int64_t* dest,
+                               int64_t compare_value);
 }
 
 template<size_t byte_size>
@@ -55,8 +55,8 @@
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
   return PrimitiveConversions::cast<D>(
-    _Atomic_add(PrimitiveConversions::cast<jint>(add_value),
-                reinterpret_cast<jint volatile*>(dest)));
+    _Atomic_add(PrimitiveConversions::cast<int32_t>(add_value),
+                reinterpret_cast<int32_t volatile*>(dest)));
 }
 
 // Not using add_using_helper; see comment for cmpxchg.
@@ -66,8 +66,8 @@
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
   return PrimitiveConversions::cast<D>(
-    _Atomic_add_long(PrimitiveConversions::cast<jlong>(add_value),
-                     reinterpret_cast<jlong volatile*>(dest)));
+    _Atomic_add_long(PrimitiveConversions::cast<int64_t>(add_value),
+                     reinterpret_cast<int64_t volatile*>(dest)));
 }
 
 template<>
@@ -76,11 +76,11 @@
                                              T volatile* dest) const {
   STATIC_ASSERT(4 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    _Atomic_xchg(PrimitiveConversions::cast<jint>(exchange_value),
-                 reinterpret_cast<jint volatile*>(dest)));
+    _Atomic_xchg(PrimitiveConversions::cast<int32_t>(exchange_value),
+                 reinterpret_cast<int32_t volatile*>(dest)));
 }
 
-extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
+extern "C" int64_t _Atomic_xchg_long(int64_t exchange_value, volatile int64_t* dest);
 
 template<>
 template<typename T>
@@ -88,8 +88,8 @@
                                              T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    _Atomic_xchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
-                      reinterpret_cast<jlong volatile*>(dest)));
+    _Atomic_xchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
+                      reinterpret_cast<int64_t volatile*>(dest)));
 }
 
 // Not using cmpxchg_using_helper here, because some configurations of
@@ -106,9 +106,9 @@
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(1 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    _Atomic_cmpxchg_byte(PrimitiveConversions::cast<jbyte>(exchange_value),
-                         reinterpret_cast<jbyte volatile*>(dest),
-                         PrimitiveConversions::cast<jbyte>(compare_value)));
+    _Atomic_cmpxchg_byte(PrimitiveConversions::cast<int8_t>(exchange_value),
+                         reinterpret_cast<int8_t volatile*>(dest),
+                         PrimitiveConversions::cast<int8_t>(compare_value)));
 }
 
 template<>
@@ -119,9 +119,9 @@
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    _Atomic_cmpxchg(PrimitiveConversions::cast<jint>(exchange_value),
-                    reinterpret_cast<jint volatile*>(dest),
-                    PrimitiveConversions::cast<jint>(compare_value)));
+    _Atomic_cmpxchg(PrimitiveConversions::cast<int32_t>(exchange_value),
+                    reinterpret_cast<int32_t volatile*>(dest),
+                    PrimitiveConversions::cast<int32_t>(compare_value)));
 }
 
 template<>
@@ -132,9 +132,9 @@
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
   return PrimitiveConversions::cast<T>(
-    _Atomic_cmpxchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
-                         reinterpret_cast<jlong volatile*>(dest),
-                         PrimitiveConversions::cast<jlong>(compare_value)));
+    _Atomic_cmpxchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
+                         reinterpret_cast<int64_t volatile*>(dest),
+                         PrimitiveConversions::cast<int64_t>(compare_value)));
 }
 
 #endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
--- a/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp	Tue Dec 19 06:29:17 2017 -0500
@@ -904,12 +904,12 @@
 // until initialization is complete.
 // TODO - replace with .il implementation when compiler supports it.
 
-typedef jint  xchg_func_t        (jint,  volatile jint*);
-typedef jint  cmpxchg_func_t     (jint,  volatile jint*,  jint);
-typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong);
-typedef jint  add_func_t         (jint,  volatile jint*);
+typedef int32_t  xchg_func_t        (int32_t,  volatile int32_t*);
+typedef int32_t  cmpxchg_func_t     (int32_t,  volatile int32_t*,  int32_t);
+typedef int64_t  cmpxchg_long_func_t(int64_t,  volatile int64_t*,  int64_t);
+typedef int32_t  add_func_t         (int32_t,  volatile int32_t*);
 
-jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
+int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
   // try to use the stub:
   xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
 
@@ -919,12 +919,12 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   *dest = exchange_value;
   return old_value;
 }
 
-jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
+int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
   // try to use the stub:
   cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
 
@@ -934,13 +934,13 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
 }
 
-jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
+int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
   // try to use the stub:
   cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
 
@@ -950,13 +950,13 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jlong old_value = *dest;
+  int64_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
 }
 
-jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
+int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
   // try to use the stub:
   add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
 
--- a/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,15 +31,15 @@
 #ifdef AMD64
   static void setup_fpu() {}
 #else
-  static jint  (*atomic_xchg_func)        (jint,  volatile jint*);
-  static jint  (*atomic_cmpxchg_func)     (jint,  volatile jint*,  jint);
-  static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
-  static jint  (*atomic_add_func)         (jint,  volatile jint*);
+  static int32_t  (*atomic_xchg_func)        (int32_t,  volatile int32_t*);
+  static int32_t  (*atomic_cmpxchg_func)     (int32_t,  volatile int32_t*, int32_t);
+  static int64_t  (*atomic_cmpxchg_long_func)(int64_t,  volatile int64_t*, int64_t);
+  static int32_t  (*atomic_add_func)         (int32_t,  volatile int32_t*);
 
-  static jint  atomic_xchg_bootstrap        (jint,  volatile jint*);
-  static jint  atomic_cmpxchg_bootstrap     (jint,  volatile jint*,  jint);
-  static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
-  static jint  atomic_add_bootstrap         (jint,  volatile jint*);
+  static int32_t  atomic_xchg_bootstrap        (int32_t,  volatile int32_t*);
+  static int32_t  atomic_cmpxchg_bootstrap     (int32_t,  volatile int32_t*, int32_t);
+  static int64_t  atomic_cmpxchg_long_bootstrap(int64_t,  volatile int64_t*, int64_t);
+  static int32_t  atomic_add_bootstrap         (int32_t,  volatile int32_t*);
 
   static void setup_fpu();
 #endif // AMD64
--- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -54,13 +54,13 @@
 template<>
 template<typename I, typename D>
 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
-  return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
+  return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
 }
 
 template<>
 template<typename I, typename D>
 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
-  return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
+  return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
 }
 
 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName)                  \
@@ -72,8 +72,8 @@
     return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
   }
 
-DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
-DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func)
+DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)
+DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func)
 
 #undef DEFINE_STUB_XCHG
 
@@ -88,9 +88,9 @@
     return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
   }
 
-DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
-DEFINE_STUB_CMPXCHG(4, jint,  os::atomic_cmpxchg_func)
-DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
+DEFINE_STUB_CMPXCHG(1, int8_t,  os::atomic_cmpxchg_byte_func)
+DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func)
+DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)
 
 #undef DEFINE_STUB_CMPXCHG
 
@@ -162,10 +162,10 @@
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
-  jint ex_lo  = (jint)exchange_value;
-  jint ex_hi  = *( ((jint*)&exchange_value) + 1 );
-  jint cmp_lo = (jint)compare_value;
-  jint cmp_hi = *( ((jint*)&compare_value) + 1 );
+  int32_t ex_lo  = (int32_t)exchange_value;
+  int32_t ex_hi  = *( ((int32_t*)&exchange_value) + 1 );
+  int32_t cmp_lo = (int32_t)compare_value;
+  int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 );
   __asm {
     push ebx
     push edi
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp	Tue Dec 19 06:29:17 2017 -0500
@@ -218,17 +218,17 @@
 
 // Atomics and Stub Functions
 
-typedef jint      xchg_func_t            (jint,     volatile jint*);
-typedef intptr_t  xchg_long_func_t       (jlong,    volatile jlong*);
-typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
-typedef jbyte     cmpxchg_byte_func_t    (jbyte,    volatile jbyte*, jbyte);
-typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
-typedef jint      add_func_t             (jint,     volatile jint*);
-typedef intptr_t  add_ptr_func_t         (intptr_t, volatile intptr_t*);
+typedef int32_t   xchg_func_t            (int32_t,  volatile int32_t*);
+typedef int64_t   xchg_long_func_t       (int64_t,  volatile int64_t*);
+typedef int32_t   cmpxchg_func_t         (int32_t,  volatile int32_t*, int32_t);
+typedef int8_t    cmpxchg_byte_func_t    (int8_t,   volatile int8_t*,  int8_t);
+typedef int64_t   cmpxchg_long_func_t    (int64_t,  volatile int64_t*, int64_t);
+typedef int32_t   add_func_t             (int32_t,  volatile int32_t*);
+typedef int64_t   add_long_func_t        (int64_t,  volatile int64_t*);
 
 #ifdef AMD64
 
-jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
+int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
   // try to use the stub:
   xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
 
@@ -238,12 +238,12 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   *dest = exchange_value;
   return old_value;
 }
 
-intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* dest) {
+int64_t os::atomic_xchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest) {
   // try to use the stub:
   xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
 
@@ -253,13 +253,13 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  intptr_t old_value = *dest;
+  int64_t old_value = *dest;
   *dest = exchange_value;
   return old_value;
 }
 
 
-jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
+int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
   // try to use the stub:
   cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
 
@@ -269,13 +269,13 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jint old_value = *dest;
+  int32_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
 }
 
-jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
+int8_t os::atomic_cmpxchg_byte_bootstrap(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value) {
   // try to use the stub:
   cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
 
@@ -285,7 +285,7 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jbyte old_value = *dest;
+  int8_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
@@ -293,7 +293,7 @@
 
 #endif // AMD64
 
-jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
+int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
   // try to use the stub:
   cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
 
@@ -303,7 +303,7 @@
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 
-  jlong old_value = *dest;
+  int64_t old_value = *dest;
   if (old_value == compare_value)
     *dest = exchange_value;
   return old_value;
@@ -311,7 +311,7 @@
 
 #ifdef AMD64
 
-jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
+int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
   // try to use the stub:
   add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
 
@@ -324,12 +324,12 @@
   return (*dest) += add_value;
 }
 
-intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
+int64_t os::atomic_add_long_bootstrap(int64_t add_value, volatile int64_t* dest) {
   // try to use the stub:
-  add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
+  add_long_func_t* func = CAST_TO_FN_PTR(add_long_func_t*, StubRoutines::atomic_add_long_entry());
 
   if (func != NULL) {
-    os::atomic_add_ptr_func = func;
+    os::atomic_add_long_func = func;
     return (*func)(add_value, dest);
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
@@ -342,7 +342,7 @@
 cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
 cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
-add_ptr_func_t*      os::atomic_add_ptr_func      = os::atomic_add_ptr_bootstrap;
+add_long_func_t*     os::atomic_add_long_func     = os::atomic_add_long_bootstrap;
 
 #endif // AMD64
 
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -29,32 +29,32 @@
   // NOTE: we are back in class os here, not win32
   //
 #ifdef AMD64
-  static jint      (*atomic_xchg_func)          (jint,      volatile jint*);
-  static intptr_t  (*atomic_xchg_long_func)     (jlong,     volatile jlong*);
+  static int32_t   (*atomic_xchg_func)          (int32_t, volatile int32_t*);
+  static int64_t   (*atomic_xchg_long_func)     (int64_t, volatile int64_t*);
 
-  static jint      (*atomic_cmpxchg_func)       (jint,      volatile jint*,  jint);
-  static jbyte     (*atomic_cmpxchg_byte_func)  (jbyte,     volatile jbyte*, jbyte);
-  static jlong     (*atomic_cmpxchg_long_func)  (jlong,     volatile jlong*, jlong);
+  static int32_t   (*atomic_cmpxchg_func)       (int32_t,  volatile int32_t*, int32_t);
+  static int8_t    (*atomic_cmpxchg_byte_func)  (int8_t,   volatile int8_t*,  int8_t);
+  static int64_t   (*atomic_cmpxchg_long_func)  (int64_t,  volatile int64_t*, int64_t);
 
-  static jint      (*atomic_add_func)           (jint,      volatile jint*);
-  static intptr_t  (*atomic_add_ptr_func)       (intptr_t,  volatile intptr_t*);
+  static int32_t   (*atomic_add_func)           (int32_t,  volatile int32_t*);
+  static int64_t   (*atomic_add_long_func)      (int64_t,  volatile int64_t*);
 
-  static jint      atomic_xchg_bootstrap        (jint,      volatile jint*);
-  static intptr_t  atomic_xchg_long_bootstrap   (jlong,     volatile jlong*);
+  static int32_t   atomic_xchg_bootstrap        (int32_t,  volatile int32_t*);
+  static int64_t   atomic_xchg_long_bootstrap   (int64_t,  volatile int64_t*);
 
-  static jint      atomic_cmpxchg_bootstrap     (jint,      volatile jint*,  jint);
-  static jbyte     atomic_cmpxchg_byte_bootstrap(jbyte,     volatile jbyte*, jbyte);
+  static int32_t   atomic_cmpxchg_bootstrap     (int32_t,  volatile int32_t*, int32_t);
+  static int8_t    atomic_cmpxchg_byte_bootstrap(int8_t,   volatile int8_t*,  int8_t);
 #else
 
-  static jlong (*atomic_cmpxchg_long_func)  (jlong, volatile jlong*, jlong);
+  static int64_t (*atomic_cmpxchg_long_func)  (int64_t, volatile int64_t*, int64_t);
 
 #endif // AMD64
 
-  static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
+  static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
 
 #ifdef AMD64
-  static jint      atomic_add_bootstrap         (jint,      volatile jint*);
-  static intptr_t  atomic_add_ptr_bootstrap     (intptr_t,  volatile intptr_t*);
+  static int32_t  atomic_add_bootstrap         (int32_t,  volatile int32_t*);
+  static int64_t  atomic_add_long_bootstrap    (int64_t,  volatile int64_t*);
 #endif // AMD64
 
   static void setup_fpu();
--- a/src/hotspot/share/runtime/atomic.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/share/runtime/atomic.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -45,8 +45,8 @@
 
 class Atomic : AllStatic {
 public:
-  // Atomic operations on jlong types are not available on all 32-bit
-  // platforms. If atomic ops on jlongs are defined here they must only
+  // Atomic operations on int64 types are not available on all 32-bit
+  // platforms. If atomic ops on int64 are defined here they must only
   // be used from code that verifies they are available at runtime and
   // can provide an alternative action if not - see supports_cx8() for
   // a means to test availability.
@@ -639,16 +639,16 @@
 //
 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
 template<>
-struct Atomic::AddImpl<jshort, jshort> VALUE_OBJ_CLASS_SPEC {
-  jshort operator()(jshort add_value, jshort volatile* dest) const {
+struct Atomic::AddImpl<short, short> VALUE_OBJ_CLASS_SPEC {
+  short operator()(short add_value, short volatile* dest) const {
 #ifdef VM_LITTLE_ENDIAN
     assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
-    jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
+    int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1));
 #else
     assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
-    jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
+    int new_value = Atomic::add(add_value << 16, (volatile int*)(dest));
 #endif
-    return (jshort)(new_value >> 16); // preserves sign
+    return (short)(new_value >> 16); // preserves sign
   }
 };
 
@@ -807,7 +807,7 @@
   do {
     // value to swap in matches current value ...
     uint32_t new_value = cur;
-    // ... except for the one jbyte we want to update
+    // ... except for the one byte we want to update
     reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
 
     uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
--- a/src/hotspot/share/runtime/stubRoutines.cpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/share/runtime/stubRoutines.cpp	Tue Dec 19 06:29:17 2017 -0500
@@ -62,12 +62,11 @@
 address StubRoutines::_atomic_xchg_entry                        = NULL;
 address StubRoutines::_atomic_xchg_long_entry                   = NULL;
 address StubRoutines::_atomic_store_entry                       = NULL;
-address StubRoutines::_atomic_store_ptr_entry                   = NULL;
 address StubRoutines::_atomic_cmpxchg_entry                     = NULL;
 address StubRoutines::_atomic_cmpxchg_byte_entry                = NULL;
 address StubRoutines::_atomic_cmpxchg_long_entry                = NULL;
 address StubRoutines::_atomic_add_entry                         = NULL;
-address StubRoutines::_atomic_add_ptr_entry                     = NULL;
+address StubRoutines::_atomic_add_long_entry                    = NULL;
 address StubRoutines::_fence_entry                              = NULL;
 address StubRoutines::_d2i_wrapper                              = NULL;
 address StubRoutines::_d2l_wrapper                              = NULL;
--- a/src/hotspot/share/runtime/stubRoutines.hpp	Mon Dec 18 12:11:01 2017 +0100
+++ b/src/hotspot/share/runtime/stubRoutines.hpp	Tue Dec 19 06:29:17 2017 -0500
@@ -103,12 +103,11 @@
   static address _atomic_xchg_entry;
   static address _atomic_xchg_long_entry;
   static address _atomic_store_entry;
-  static address _atomic_store_ptr_entry;
   static address _atomic_cmpxchg_entry;
   static address _atomic_cmpxchg_byte_entry;
   static address _atomic_cmpxchg_long_entry;
   static address _atomic_add_entry;
-  static address _atomic_add_ptr_entry;
+  static address _atomic_add_long_entry;
   static address _fence_entry;
   static address _d2i_wrapper;
   static address _d2l_wrapper;
@@ -277,12 +276,11 @@
   static address atomic_xchg_entry()                       { return _atomic_xchg_entry; }
   static address atomic_xchg_long_entry()                  { return _atomic_xchg_long_entry; }
   static address atomic_store_entry()                      { return _atomic_store_entry; }
-  static address atomic_store_ptr_entry()                  { return _atomic_store_ptr_entry; }
   static address atomic_cmpxchg_entry()                    { return _atomic_cmpxchg_entry; }
   static address atomic_cmpxchg_byte_entry()               { return _atomic_cmpxchg_byte_entry; }
   static address atomic_cmpxchg_long_entry()               { return _atomic_cmpxchg_long_entry; }
   static address atomic_add_entry()                        { return _atomic_add_entry; }
-  static address atomic_add_ptr_entry()                    { return _atomic_add_ptr_entry; }
+  static address atomic_add_long_entry()                   { return _atomic_add_long_entry; }
   static address fence_entry()                             { return _fence_entry; }
 
   static address d2i_wrapper()                             { return _d2i_wrapper; }