8234562: Move OrderAccess::release_store*/load_acquire to Atomic
authorstefank
Mon, 25 Nov 2019 12:22:13 +0100
changeset 59247 56bf71d64d51
parent 59246 fcad92f425c5
child 59248 e92153ed8bdc
8234562: Move OrderAccess::release_store*/load_acquire to Atomic Reviewed-by: rehn, dholmes
src/hotspot/cpu/ppc/nativeInst_ppc.cpp
src/hotspot/os/bsd/os_bsd.cpp
src/hotspot/os/windows/os_windows.cpp
src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp
src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp
src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp
src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp
src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp
src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp
src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp
src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp
src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp
src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp
src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp
src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp
src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp
src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp
src/hotspot/share/classfile/classLoader.inline.hpp
src/hotspot/share/classfile/classLoaderData.cpp
src/hotspot/share/classfile/classLoaderDataGraph.cpp
src/hotspot/share/code/compiledMethod.cpp
src/hotspot/share/code/compiledMethod.inline.hpp
src/hotspot/share/code/dependencyContext.cpp
src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp
src/hotspot/share/gc/g1/heapRegionRemSet.cpp
src/hotspot/share/gc/g1/heapRegionRemSet.hpp
src/hotspot/share/gc/g1/heapRegionRemSet.inline.hpp
src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp
src/hotspot/share/gc/shared/concurrentGCThread.cpp
src/hotspot/share/gc/shared/oopStorage.cpp
src/hotspot/share/gc/shared/ptrQueue.cpp
src/hotspot/share/gc/shared/taskqueue.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp
src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
src/hotspot/share/gc/z/zLiveMap.cpp
src/hotspot/share/gc/z/zLiveMap.inline.hpp
src/hotspot/share/gc/z/zNMethodData.cpp
src/hotspot/share/gc/z/zObjectAllocator.cpp
src/hotspot/share/interpreter/oopMapCache.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp
src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp
src/hotspot/share/jfr/utilities/jfrHashtable.hpp
src/hotspot/share/logging/logDecorations.cpp
src/hotspot/share/memory/metaspace.cpp
src/hotspot/share/oops/accessBackend.inline.hpp
src/hotspot/share/oops/array.hpp
src/hotspot/share/oops/arrayKlass.inline.hpp
src/hotspot/share/oops/constantPool.cpp
src/hotspot/share/oops/constantPool.inline.hpp
src/hotspot/share/oops/cpCache.cpp
src/hotspot/share/oops/cpCache.inline.hpp
src/hotspot/share/oops/instanceKlass.cpp
src/hotspot/share/oops/instanceKlass.inline.hpp
src/hotspot/share/oops/klass.cpp
src/hotspot/share/oops/method.cpp
src/hotspot/share/oops/method.inline.hpp
src/hotspot/share/oops/methodData.cpp
src/hotspot/share/oops/methodData.inline.hpp
src/hotspot/share/oops/oop.inline.hpp
src/hotspot/share/prims/jni.cpp
src/hotspot/share/prims/jvm.cpp
src/hotspot/share/prims/jvmtiEnvBase.hpp
src/hotspot/share/prims/jvmtiRawMonitor.cpp
src/hotspot/share/runtime/atomic.hpp
src/hotspot/share/runtime/handshake.cpp
src/hotspot/share/runtime/init.cpp
src/hotspot/share/runtime/interfaceSupport.cpp
src/hotspot/share/runtime/objectMonitor.cpp
src/hotspot/share/runtime/orderAccess.hpp
src/hotspot/share/runtime/perfMemory.cpp
src/hotspot/share/runtime/safepoint.cpp
src/hotspot/share/runtime/synchronizer.cpp
src/hotspot/share/runtime/thread.cpp
src/hotspot/share/runtime/thread.inline.hpp
src/hotspot/share/runtime/threadHeapSampler.cpp
src/hotspot/share/runtime/threadSMR.cpp
src/hotspot/share/runtime/threadSMR.inline.hpp
src/hotspot/share/runtime/vmThread.cpp
src/hotspot/share/services/memoryManager.cpp
src/hotspot/share/services/memoryPool.cpp
src/hotspot/share/utilities/bitMap.inline.hpp
src/hotspot/share/utilities/concurrentHashTable.inline.hpp
src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp
src/hotspot/share/utilities/globalCounter.cpp
src/hotspot/share/utilities/globalCounter.inline.hpp
src/hotspot/share/utilities/hashtable.inline.hpp
src/hotspot/share/utilities/singleWriterSynchronizer.cpp
test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp
test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp
test/hotspot/gtest/utilities/test_globalCounter.cpp
test/hotspot/gtest/utilities/test_globalCounter_nested.cpp
test/hotspot/gtest/utilities/test_lockFreeStack.cpp
test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp
test/hotspot/gtest/utilities/test_waitBarrier.cpp
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -374,7 +374,7 @@
   // Finally patch out the jump.
   volatile juint *jump_addr = (volatile juint*)instr_addr;
   // Release not needed because caller uses invalidate_range after copying the remaining bytes.
-  //OrderAccess::release_store(jump_addr, *((juint*)code_buffer));
+  //Atomic::release_store(jump_addr, *((juint*)code_buffer));
   *jump_addr = *((juint*)code_buffer); // atomically store code over branch instruction
   ICache::ppc64_flush_icache_bytes(instr_addr, NativeGeneralJump::instruction_size);
 }
--- a/src/hotspot/os/bsd/os_bsd.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -51,7 +51,6 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/semaphore.hpp"
@@ -3209,7 +3208,7 @@
 static volatile int next_processor_id = 0;
 
 static inline volatile int* get_apic_to_processor_mapping() {
-  volatile int* mapping = OrderAccess::load_acquire(&apic_to_processor_mapping);
+  volatile int* mapping = Atomic::load_acquire(&apic_to_processor_mapping);
   if (mapping == NULL) {
     // Calculate possible number space for APIC ids. This space is not necessarily
     // in the range [0, number_of_processors).
@@ -3240,7 +3239,7 @@
 
     if (!Atomic::replace_if_null(mapping, &apic_to_processor_mapping)) {
       FREE_C_HEAP_ARRAY(int, mapping);
-      mapping = OrderAccess::load_acquire(&apic_to_processor_mapping);
+      mapping = Atomic::load_acquire(&apic_to_processor_mapping);
     }
   }
 
--- a/src/hotspot/os/windows/os_windows.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os/windows/os_windows.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -3747,7 +3747,7 @@
     // The first thread that reached this point, initializes the critical section.
     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
-    } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
+    } else if (Atomic::load_acquire(&process_exiting) == 0) {
       if (what != EPT_THREAD) {
         // Atomically set process_exiting before the critical section
         // to increase the visibility between racing threads.
@@ -3755,7 +3755,7 @@
       }
       EnterCriticalSection(&crit_sect);
 
-      if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
+      if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
         // Remove from the array those handles of the threads that have completed exiting.
         for (i = 0, j = 0; i < handle_count; ++i) {
           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
@@ -3868,7 +3868,7 @@
     }
 
     if (!registered &&
-        OrderAccess::load_acquire(&process_exiting) != 0 &&
+        Atomic::load_acquire(&process_exiting) != 0 &&
         process_exiting != GetCurrentThreadId()) {
       // Some other thread is about to call exit(), so we don't let
       // the current unregistered thread proceed to exit() or _endthreadex()
--- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -30,6 +30,7 @@
 #error "Atomic currently only implemented for PPC64"
 #endif
 
+#include "orderAccess_aix_ppc.hpp"
 #include "utilities/debug.hpp"
 
 // Implementation of class atomic
@@ -399,4 +400,15 @@
   return old_value;
 }
 
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE> {
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    T t = Atomic::load(p);
+    // Use twi-isync for load_acquire (faster than lwsync).
+    __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory");
+    return t;
+  }
+};
+
 #endif // OS_CPU_AIX_PPC_ATOMIC_AIX_PPC_HPP
--- a/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -64,8 +64,6 @@
 #define inlasm_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory");
 #define inlasm_eieio()    __asm__ __volatile__ ("eieio"  : : : "memory");
 #define inlasm_isync()    __asm__ __volatile__ ("isync"  : : : "memory");
-// Use twi-isync for load_acquire (faster than lwsync).
-#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
 
 inline void OrderAccess::loadload()   { inlasm_lwsync(); }
 inline void OrderAccess::storestore() { inlasm_lwsync(); }
@@ -78,13 +76,6 @@
 inline void OrderAccess::cross_modify_fence()
                                       { inlasm_isync();  }
 
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
-};
-
 #undef inlasm_sync
 #undef inlasm_lwsync
 #undef inlasm_eieio
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -169,4 +169,54 @@
 
 #endif // AMD64
 
+template<>
+struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgb (%2),%0"
+                      : "=q" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgw (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgl (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+#ifdef AMD64
+template<>
+struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgq (%2), %0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+#endif // AMD64
+
 #endif // OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP
--- a/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -64,54 +64,4 @@
   __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
 }
 
-template<>
-struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgb (%2),%0"
-                      : "=q" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgw (%2),%0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgl (%2),%0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-#ifdef AMD64
-template<>
-struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgq (%2), %0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-#endif // AMD64
-
 #endif // OS_CPU_BSD_X86_ORDERACCESS_BSD_X86_HPP
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -32,10 +32,6 @@
 // Note that memory_order_conservative requires a full barrier after atomic stores.
 // See https://patchwork.kernel.org/patch/3575821/
 
-#define FULL_MEM_BARRIER  __sync_synchronize()
-#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
-#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@@ -81,4 +77,25 @@
   }
 }
 
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
+};
+
+template<size_t byte_size>
+struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
+};
+
+template<size_t byte_size>
+struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const { release_store(p, v); OrderAccess::fence(); }
+};
+
 #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
--- a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -37,6 +37,10 @@
 inline void OrderAccess::loadstore()  { acquire(); }
 inline void OrderAccess::storeload()  { fence(); }
 
+#define FULL_MEM_BARRIER  __sync_synchronize()
+#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
+
 inline void OrderAccess::acquire() {
   READ_MEM_BARRIER;
 }
@@ -51,25 +55,4 @@
 
 inline void OrderAccess::cross_modify_fence() { }
 
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
-};
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
-};
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const { release_store(p, v); fence(); }
-};
-
 #endif // OS_CPU_LINUX_AARCH64_ORDERACCESS_LINUX_AARCH64_HPP
--- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -30,6 +30,7 @@
 #error "Atomic currently only implemented for PPC64"
 #endif
 
+#include "orderAccess_linux_ppc.hpp"
 #include "utilities/debug.hpp"
 
 // Implementation of class atomic
@@ -399,4 +400,16 @@
   return old_value;
 }
 
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    T t = Atomic::load(p);
+    // Use twi-isync for load_acquire (faster than lwsync).
+    __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory");
+    return t;
+  }
+};
+
 #endif // OS_CPU_LINUX_PPC_ATOMIC_LINUX_PPC_HPP
--- a/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -68,8 +68,6 @@
 #define inlasm_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory");
 #define inlasm_eieio()    __asm__ __volatile__ ("eieio"  : : : "memory");
 #define inlasm_isync()    __asm__ __volatile__ ("isync"  : : : "memory");
-// Use twi-isync for load_acquire (faster than lwsync).
-#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
 
 inline void   OrderAccess::loadload()   { inlasm_lwsync(); }
 inline void   OrderAccess::storestore() { inlasm_lwsync(); }
@@ -82,17 +80,9 @@
 inline void   OrderAccess::cross_modify_fence()
                                         { inlasm_isync();  }
 
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
-};
-
 #undef inlasm_sync
 #undef inlasm_lwsync
 #undef inlasm_eieio
 #undef inlasm_isync
-#undef inlasm_acquire_reg
 
 #endif // OS_CPU_LINUX_PPC_ORDERACCESS_LINUX_PPC_HPP
--- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -335,4 +335,11 @@
   return old;
 }
 
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { T t = *p; OrderAccess::acquire(); return t; }
+};
+
 #endif // OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP
--- a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -76,13 +76,6 @@
 inline void OrderAccess::fence()      { inlasm_zarch_sync(); }
 inline void OrderAccess::cross_modify_fence() { inlasm_zarch_sync(); }
 
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { T t = *p; inlasm_zarch_acquire(); return t; }
-};
-
 #undef inlasm_compiler_barrier
 #undef inlasm_zarch_sync
 #undef inlasm_zarch_release
--- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -169,4 +169,54 @@
 
 #endif // AMD64
 
+template<>
+struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgb (%2),%0"
+                      : "=q" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgw (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgl (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+#ifdef AMD64
+template<>
+struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgq (%2), %0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+#endif // AMD64
+
 #endif // OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP
--- a/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -66,54 +66,4 @@
 #endif
 }
 
-template<>
-struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgb (%2),%0"
-                      : "=q" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgw (%2),%0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgl (%2),%0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-#ifdef AMD64
-template<>
-struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgq (%2), %0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-#endif // AMD64
-
 #endif // OS_CPU_LINUX_X86_ORDERACCESS_LINUX_X86_HPP
--- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -27,6 +27,17 @@
 
 #include "runtime/os.hpp"
 
+// Note that in MSVC, volatile memory accesses are explicitly
+// guaranteed to have acquire release semantics (w.r.t. compiler
+// reordering) and therefore does not even need a compiler barrier
+// for normal acquire release accesses. And all generalized
+// bound calls like release_store go through Atomic::load
+// and Atomic::store which do volatile memory accesses.
+template<> inline void ScopedFence<X_ACQUIRE>::postfix()       { }
+template<> inline void ScopedFence<RELEASE_X>::prefix()        { }
+template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix()  { }
+template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
+
 // The following alternative implementations are needed because
 // Windows 95 doesn't support (some of) the corresponding Windows NT
 // calls. Furthermore, these versions allow inlining in the caller.
@@ -218,4 +229,45 @@
 
 #pragma warning(default: 4035) // Enables warnings reporting missing return statement
 
+#ifndef AMD64
+template<>
+struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm {
+      mov edx, p;
+      mov al, v;
+      xchg al, byte ptr [edx];
+    }
+  }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm {
+      mov edx, p;
+      mov ax, v;
+      xchg ax, word ptr [edx];
+    }
+  }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm {
+      mov edx, p;
+      mov eax, v;
+      xchg eax, dword ptr [edx];
+    }
+  }
+};
+#endif // AMD64
+
 #endif // OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP
--- a/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -39,17 +39,6 @@
   _ReadWriteBarrier();
 }
 
-// Note that in MSVC, volatile memory accesses are explicitly
-// guaranteed to have acquire release semantics (w.r.t. compiler
-// reordering) and therefore does not even need a compiler barrier
-// for normal acquire release accesses. And all generalized
-// bound calls like release_store go through OrderAccess::load
-// and OrderAccess::store which do volatile memory accesses.
-template<> inline void ScopedFence<X_ACQUIRE>::postfix()       { }
-template<> inline void ScopedFence<RELEASE_X>::prefix()        { }
-template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix()  { }
-template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
-
 inline void OrderAccess::loadload()   { compiler_barrier(); }
 inline void OrderAccess::storestore() { compiler_barrier(); }
 inline void OrderAccess::loadstore()  { compiler_barrier(); }
@@ -74,45 +63,4 @@
   __cpuid(regs, 0);
 }
 
-#ifndef AMD64
-template<>
-struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm {
-      mov edx, p;
-      mov al, v;
-      xchg al, byte ptr [edx];
-    }
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm {
-      mov edx, p;
-      mov ax, v;
-      xchg ax, word ptr [edx];
-    }
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm {
-      mov edx, p;
-      mov eax, v;
-      xchg eax, dword ptr [edx];
-    }
-  }
-};
-#endif // AMD64
-
 #endif // OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP
--- a/src/hotspot/share/classfile/classLoader.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/classfile/classLoader.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -29,11 +29,11 @@
 #include "runtime/orderAccess.hpp"
 
 // Next entry in class path
-inline ClassPathEntry* ClassPathEntry::next() const { return OrderAccess::load_acquire(&_next); }
+inline ClassPathEntry* ClassPathEntry::next() const { return Atomic::load_acquire(&_next); }
 
 inline void ClassPathEntry::set_next(ClassPathEntry* next) {
   // may have unlocked readers, so ensure visibility.
-  OrderAccess::release_store(&_next, next);
+  Atomic::release_store(&_next, next);
 }
 
 inline ClassPathEntry* ClassLoader::classpath_entry(int n) {
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -187,11 +187,11 @@
 oop* ClassLoaderData::ChunkedHandleList::add(oop o) {
   if (_head == NULL || _head->_size == Chunk::CAPACITY) {
     Chunk* next = new Chunk(_head);
-    OrderAccess::release_store(&_head, next);
+    Atomic::release_store(&_head, next);
   }
   oop* handle = &_head->_data[_head->_size];
   NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o);
-  OrderAccess::release_store(&_head->_size, _head->_size + 1);
+  Atomic::release_store(&_head->_size, _head->_size + 1);
   return handle;
 }
 
@@ -214,10 +214,10 @@
 }
 
 void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
-  Chunk* head = OrderAccess::load_acquire(&_head);
+  Chunk* head = Atomic::load_acquire(&_head);
   if (head != NULL) {
     // Must be careful when reading size of head
-    oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size));
+    oops_do_chunk(f, head, Atomic::load_acquire(&head->_size));
     for (Chunk* c = head->_next; c != NULL; c = c->_next) {
       oops_do_chunk(f, c, c->_size);
     }
@@ -326,7 +326,7 @@
 
 void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
   // Lock-free access requires load_acquire
-  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     klass_closure->do_klass(k);
     assert(k != k->next_link(), "no loops!");
   }
@@ -334,7 +334,7 @@
 
 void ClassLoaderData::classes_do(void f(Klass * const)) {
   // Lock-free access requires load_acquire
-  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     f(k);
     assert(k != k->next_link(), "no loops!");
   }
@@ -342,7 +342,7 @@
 
 void ClassLoaderData::methods_do(void f(Method*)) {
   // Lock-free access requires load_acquire
-  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
       InstanceKlass::cast(k)->methods_do(f);
     }
@@ -351,7 +351,7 @@
 
 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
   // Lock-free access requires load_acquire
-  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     // Do not filter ArrayKlass oops here...
     if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) {
 #ifdef ASSERT
@@ -366,7 +366,7 @@
 
 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
   // Lock-free access requires load_acquire
-  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k->is_instance_klass()) {
       f(InstanceKlass::cast(k));
     }
@@ -465,7 +465,7 @@
     k->set_next_link(old_value);
     // Link the new item into the list, making sure the linked class is stable
     // since the list can be walked without a lock
-    OrderAccess::release_store(&_klasses, k);
+    Atomic::release_store(&_klasses, k);
     if (k->is_array_klass()) {
       ClassLoaderDataGraph::inc_array_classes(1);
     } else {
@@ -552,7 +552,7 @@
 ModuleEntryTable* ClassLoaderData::modules() {
   // Lazily create the module entry table at first request.
   // Lock-free access requires load_acquire.
-  ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules);
+  ModuleEntryTable* modules = Atomic::load_acquire(&_modules);
   if (modules == NULL) {
     MutexLocker m1(Module_lock);
     // Check if _modules got allocated while we were waiting for this lock.
@@ -562,7 +562,7 @@
       {
         MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
         // Ensure _modules is stable, since it is examined without a lock
-        OrderAccess::release_store(&_modules, modules);
+        Atomic::release_store(&_modules, modules);
       }
     }
   }
@@ -752,7 +752,7 @@
   // The reason for the delayed allocation is because some class loaders are
   // simply for delegating with no metadata of their own.
   // Lock-free access requires load_acquire.
-  ClassLoaderMetaspace* metaspace = OrderAccess::load_acquire(&_metaspace);
+  ClassLoaderMetaspace* metaspace = Atomic::load_acquire(&_metaspace);
   if (metaspace == NULL) {
     MutexLocker ml(_metaspace_lock,  Mutex::_no_safepoint_check_flag);
     // Check if _metaspace got allocated while we were waiting for this lock.
@@ -768,7 +768,7 @@
         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
       }
       // Ensure _metaspace is stable, since it is examined without a lock
-      OrderAccess::release_store(&_metaspace, metaspace);
+      Atomic::release_store(&_metaspace, metaspace);
     }
   }
   return metaspace;
@@ -969,7 +969,7 @@
 
 bool ClassLoaderData::contains_klass(Klass* klass) {
   // Lock-free access requires load_acquire
-  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k == klass) return true;
   }
   return false;
--- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -59,13 +59,13 @@
   //
   // Any ClassLoaderData added after or during walking the list are prepended to
   // _head. Their claim mark need not be handled here.
-  for (ClassLoaderData* cld = OrderAccess::load_acquire(&_head); cld != NULL; cld = cld->next()) {
+  for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != NULL; cld = cld->next()) {
     cld->clear_claim();
   }
 }
 
 void ClassLoaderDataGraph::clear_claimed_marks(int claim) {
- for (ClassLoaderData* cld = OrderAccess::load_acquire(&_head); cld != NULL; cld = cld->next()) {
+ for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != NULL; cld = cld->next()) {
     cld->clear_claim(claim);
   }
 }
@@ -220,7 +220,7 @@
 
   // First install the new CLD to the Graph.
   cld->set_next(_head);
-  OrderAccess::release_store(&_head, cld);
+  Atomic::release_store(&_head, cld);
 
   // Next associate with the class_loader.
   if (!is_unsafe_anonymous) {
--- a/src/hotspot/share/code/compiledMethod.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/code/compiledMethod.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -113,7 +113,7 @@
 //-----------------------------------------------------------------------------
 
 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
-  return OrderAccess::load_acquire(&_exception_cache);
+  return Atomic::load_acquire(&_exception_cache);
 }
 
 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
--- a/src/hotspot/share/code/compiledMethod.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/code/compiledMethod.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -61,7 +61,7 @@
 
 // class ExceptionCache methods
 
-inline int ExceptionCache::count() { return OrderAccess::load_acquire(&_count); }
+inline int ExceptionCache::count() { return Atomic::load_acquire(&_count); }
 
 address ExceptionCache::pc_at(int index) {
   assert(index >= 0 && index < count(),"");
@@ -74,7 +74,7 @@
 }
 
 // increment_count is only called under lock, but there may be concurrent readers.
-inline void ExceptionCache::increment_count() { OrderAccess::release_store(&_count, _count + 1); }
+inline void ExceptionCache::increment_count() { Atomic::release_store(&_count, _count + 1); }
 
 
 #endif // SHARE_CODE_COMPILEDMETHOD_INLINE_HPP
--- a/src/hotspot/share/code/dependencyContext.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/code/dependencyContext.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -281,7 +281,7 @@
 nmethodBucket* DependencyContext::dependencies_not_unloading() {
   for (;;) {
     // Need acquire becase the read value could come from a concurrent insert.
-    nmethodBucket* head = OrderAccess::load_acquire(_dependency_context_addr);
+    nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
     if (head == NULL || !head->get_nmethod()->is_unloading()) {
       return head;
     }
--- a/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -158,13 +158,13 @@
 }
 
 G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
-  return OrderAccess::load_acquire(&_table);
+  return Atomic::load_acquire(&_table);
 }
 
 void G1CodeRootSet::allocate_small_table() {
   G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
 
-  OrderAccess::release_store(&_table, temp);
+  Atomic::release_store(&_table, temp);
 }
 
 void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
@@ -194,7 +194,7 @@
 
   G1CodeRootSetTable::purge_list_append(_table);
 
-  OrderAccess::release_store(&_table, temp);
+  Atomic::release_store(&_table, temp);
 }
 
 void G1CodeRootSet::purge() {
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -219,7 +219,7 @@
       // some mark bits may not yet seem cleared or a 'later' update
       // performed by a concurrent thread could be undone when the
       // zeroing becomes visible). This requires store ordering.
-      OrderAccess::release_store(&_fine_grain_regions[ind], prt);
+      Atomic::release_store(&_fine_grain_regions[ind], prt);
       _n_fine_entries++;
 
       // Transfer from sparse to fine-grain.
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -190,7 +190,7 @@
   // We need access in order to union things into the base table.
   BitMap* bm() { return &_bm; }
 
-  HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); }
+  HeapRegion* hr() const { return Atomic::load_acquire(&_hr); }
 
   jint occupied() const {
     // Overkill, but if we ever need it...
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -65,7 +65,7 @@
   _bm.clear();
   // Make sure that the bitmap clearing above has been finished before publishing
   // this PRT to concurrent threads.
-  OrderAccess::release_store(&_hr, hr);
+  Atomic::release_store(&_hr, hr);
 }
 
 template <class Closure>
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -27,14 +27,14 @@
 
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/cardTable.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
 
 template <DecoratorSet decorators, typename T>
 inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) {
   volatile CardValue* byte = _card_table->byte_for(field);
   if (_card_table->scanned_concurrently()) {
     // Perform a releasing store if the card table is scanned concurrently
-    OrderAccess::release_store(byte, CardTable::dirty_card_val());
+    Atomic::release_store(byte, CardTable::dirty_card_val());
   } else {
     *byte = CardTable::dirty_card_val();
   }
--- a/src/hotspot/share/gc/shared/concurrentGCThread.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/shared/concurrentGCThread.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -51,7 +51,7 @@
 
   // Signal thread has terminated
   MonitorLocker ml(Terminator_lock);
-  OrderAccess::release_store(&_has_terminated, true);
+  Atomic::release_store(&_has_terminated, true);
   ml.notify_all();
 }
 
@@ -60,7 +60,7 @@
   assert(!has_terminated(), "Invalid state");
 
   // Signal thread to terminate
-  OrderAccess::release_store_fence(&_should_terminate, true);
+  Atomic::release_store_fence(&_should_terminate, true);
 
   stop_service();
 
@@ -72,9 +72,9 @@
 }
 
 bool ConcurrentGCThread::should_terminate() const {
-  return OrderAccess::load_acquire(&_should_terminate);
+  return Atomic::load_acquire(&_should_terminate);
 }
 
 bool ConcurrentGCThread::has_terminated() const {
-  return OrderAccess::load_acquire(&_has_terminated);
+  return Atomic::load_acquire(&_has_terminated);
 }
--- a/src/hotspot/share/gc/shared/oopStorage.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -140,7 +140,7 @@
 }
 
 size_t OopStorage::ActiveArray::block_count_acquire() const {
-  return OrderAccess::load_acquire(&_block_count);
+  return Atomic::load_acquire(&_block_count);
 }
 
 void OopStorage::ActiveArray::increment_refcount() const {
@@ -161,7 +161,7 @@
     *block_ptr(index) = block;
     // Use a release_store to ensure all the setup is complete before
     // making the block visible.
-    OrderAccess::release_store(&_block_count, index + 1);
+    Atomic::release_store(&_block_count, index + 1);
     return true;
   } else {
     return false;
@@ -264,8 +264,8 @@
 bool OopStorage::Block::is_safe_to_delete() const {
   assert(is_empty(), "precondition");
   OrderAccess::loadload();
-  return (OrderAccess::load_acquire(&_release_refcount) == 0) &&
-         (OrderAccess::load_acquire(&_deferred_updates_next) == NULL);
+  return (Atomic::load_acquire(&_release_refcount) == 0) &&
+         (Atomic::load_acquire(&_deferred_updates_next) == NULL);
 }
 
 OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
@@ -514,7 +514,7 @@
   // Update new_array refcount to account for the new reference.
   new_array->increment_refcount();
   // Install new_array, ensuring its initialization is complete first.
-  OrderAccess::release_store(&_active_array, new_array);
+  Atomic::release_store(&_active_array, new_array);
   // Wait for any readers that could read the old array from _active_array.
   // Can't use GlobalCounter here, because this is called from allocate(),
   // which may be called in the scope of a GlobalCounter critical section
@@ -532,7 +532,7 @@
 // using it.
 OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
   SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
-  ActiveArray* result = OrderAccess::load_acquire(&_active_array);
+  ActiveArray* result = Atomic::load_acquire(&_active_array);
   result->increment_refcount();
   return result;
 }
@@ -645,7 +645,7 @@
   // Atomically pop a block off the list, if any available.
   // No ABA issue because this is only called by one thread at a time.
   // The atomicity is wrto pushes by release().
-  Block* block = OrderAccess::load_acquire(&_deferred_updates);
+  Block* block = Atomic::load_acquire(&_deferred_updates);
   while (true) {
     if (block == NULL) return false;
     // Try atomic pop of block from list.
@@ -833,23 +833,23 @@
 void OopStorage::record_needs_cleanup() {
   // Set local flag first, else service thread could wake up and miss
   // the request.  This order may instead (rarely) unnecessarily notify.
-  OrderAccess::release_store(&_needs_cleanup, true);
-  OrderAccess::release_store_fence(&needs_cleanup_requested, true);
+  Atomic::release_store(&_needs_cleanup, true);
+  Atomic::release_store_fence(&needs_cleanup_requested, true);
 }
 
 bool OopStorage::delete_empty_blocks() {
   // Service thread might have oopstorage work, but not for this object.
   // Check for deferred updates even though that's not a service thread
   // trigger; since we're here, we might as well process them.
-  if (!OrderAccess::load_acquire(&_needs_cleanup) &&
-      (OrderAccess::load_acquire(&_deferred_updates) == NULL)) {
+  if (!Atomic::load_acquire(&_needs_cleanup) &&
+      (Atomic::load_acquire(&_deferred_updates) == NULL)) {
     return false;
   }
 
   MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
 
   // Clear the request before processing.
-  OrderAccess::release_store_fence(&_needs_cleanup, false);
+  Atomic::release_store_fence(&_needs_cleanup, false);
 
   // Other threads could be adding to the empty block count or the
   // deferred update list while we're working.  Set an upper bound on
@@ -993,7 +993,7 @@
 
 bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
   data->_processed += data->_segment_end - data->_segment_start;
-  size_t start = OrderAccess::load_acquire(&_next_block);
+  size_t start = Atomic::load_acquire(&_next_block);
   if (start >= _block_count) {
     return finish_iteration(data); // No more blocks available.
   }
--- a/src/hotspot/share/gc/shared/ptrQueue.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/shared/ptrQueue.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -224,7 +224,7 @@
     log_trace(gc, ptrqueue, freelist)
              ("Transferred %s pending to free: " SIZE_FORMAT, name(), count);
   }
-  OrderAccess::release_store(&_transfer_lock, false);
+  Atomic::release_store(&_transfer_lock, false);
   return true;
 }
 
--- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -68,7 +68,7 @@
     // assignment.  However, casting to E& means that we trigger an
     // unused-value warning.  So, we cast the E& to void.
     (void)const_cast<E&>(_elems[localBot] = t);
-    OrderAccess::release_store(&_bottom, increment_index(localBot));
+    Atomic::release_store(&_bottom, increment_index(localBot));
     TASKQUEUE_STATS_ONLY(stats.record_push());
     return true;
   }
@@ -89,7 +89,7 @@
     // assignment.  However, casting to E& means that we trigger an
     // unused-value warning.  So, we cast the E& to void.
     (void) const_cast<E&>(_elems[localBot] = t);
-    OrderAccess::release_store(&_bottom, increment_index(localBot));
+    Atomic::release_store(&_bottom, increment_index(localBot));
     TASKQUEUE_STATS_ONLY(stats.record_push());
     return true;
   } else {
@@ -210,7 +210,7 @@
 #ifndef CPU_MULTI_COPY_ATOMIC
   OrderAccess::fence();
 #endif
-  uint localBot = OrderAccess::load_acquire(&_bottom);
+  uint localBot = Atomic::load_acquire(&_bottom);
   uint n_elems = size(localBot, oldAge.top());
   if (n_elems == 0) {
     return false;
--- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -38,7 +38,7 @@
 }
 
 void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() {
-  while ((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) != 0) {
+  while ((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) != 0) {
     os::naked_short_sleep(1);
   }
   // At this point we are sure that no threads can evacuate anything. Raise
@@ -48,7 +48,7 @@
 }
 
 void ShenandoahEvacOOMHandler::enter_evacuation() {
-  jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac);
+  jint threads_in_evac = Atomic::load_acquire(&_threads_in_evac);
 
   assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity");
   assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set");
@@ -79,7 +79,7 @@
 
 void ShenandoahEvacOOMHandler::leave_evacuation() {
   if (!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
-    assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) > 0, "sanity");
+    assert((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) > 0, "sanity");
     // NOTE: It's ok to simply decrement, even with mask set, because unmasked value is positive.
     Atomic::dec(&_threads_in_evac);
   } else {
@@ -96,7 +96,7 @@
   assert(ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity");
   assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set");
 
-  jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac);
+  jint threads_in_evac = Atomic::load_acquire(&_threads_in_evac);
   while (true) {
     jint other = Atomic::cmpxchg((threads_in_evac - 1) | OOM_MARKER_MASK,
                                   &_threads_in_evac, threads_in_evac);
@@ -113,8 +113,8 @@
 
 void ShenandoahEvacOOMHandler::clear() {
   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
-  assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) == 0, "sanity");
-  OrderAccess::release_store_fence<jint>(&_threads_in_evac, 0);
+  assert((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) == 0, "sanity");
+  Atomic::release_store_fence<jint>(&_threads_in_evac, 0);
 }
 
 ShenandoahEvacOOMScope::ShenandoahEvacOOMScope() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -601,7 +601,7 @@
 }
 
 size_t ShenandoahHeap::used() const {
-  return OrderAccess::load_acquire(&_used);
+  return Atomic::load_acquire(&_used);
 }
 
 size_t ShenandoahHeap::committed() const {
@@ -624,7 +624,7 @@
 }
 
 void ShenandoahHeap::set_used(size_t bytes) {
-  OrderAccess::release_store_fence(&_used, bytes);
+  Atomic::release_store_fence(&_used, bytes);
 }
 
 void ShenandoahHeap::decrease_used(size_t bytes) {
@@ -2114,11 +2114,11 @@
 }
 
 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
-  return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
+  return Atomic::load_acquire(&_bytes_allocated_since_gc_start);
 }
 
 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
-  OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
+  Atomic::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
 }
 
 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -305,7 +305,7 @@
 }
 
 void ShenandoahHeapRegion::clear_live_data() {
-  OrderAccess::release_store_fence<size_t>(&_live_data, 0);
+  Atomic::release_store_fence<size_t>(&_live_data, 0);
 }
 
 void ShenandoahHeapRegion::reset_alloc_metadata() {
@@ -351,7 +351,7 @@
 }
 
 size_t ShenandoahHeapRegion::get_live_data_words() const {
-  return OrderAccess::load_acquire(&_live_data);
+  return Atomic::load_acquire(&_live_data);
 }
 
 size_t ShenandoahHeapRegion::get_live_data_bytes() const {
--- a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -47,19 +47,19 @@
   }
 
   void set() {
-    OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)SET);
+    Atomic::release_store_fence(&value, (ShenandoahSharedValue)SET);
   }
 
   void unset() {
-    OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)UNSET);
+    Atomic::release_store_fence(&value, (ShenandoahSharedValue)UNSET);
   }
 
   bool is_set() const {
-    return OrderAccess::load_acquire(&value) == SET;
+    return Atomic::load_acquire(&value) == SET;
   }
 
   bool is_unset() const {
-    return OrderAccess::load_acquire(&value) == UNSET;
+    return Atomic::load_acquire(&value) == UNSET;
   }
 
   void set_cond(bool val) {
@@ -118,7 +118,7 @@
     assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
     ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
     while (true) {
-      ShenandoahSharedValue ov = OrderAccess::load_acquire(&value);
+      ShenandoahSharedValue ov = Atomic::load_acquire(&value);
       if ((ov & mask_val) != 0) {
         // already set
         return;
@@ -136,7 +136,7 @@
     assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
     ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
     while (true) {
-      ShenandoahSharedValue ov = OrderAccess::load_acquire(&value);
+      ShenandoahSharedValue ov = Atomic::load_acquire(&value);
       if ((ov & mask_val) == 0) {
         // already unset
         return;
@@ -151,7 +151,7 @@
   }
 
   void clear() {
-    OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)0);
+    Atomic::release_store_fence(&value, (ShenandoahSharedValue)0);
   }
 
   bool is_set(uint mask) const {
@@ -160,11 +160,11 @@
 
   bool is_unset(uint mask) const {
     assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
-    return (OrderAccess::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0;
+    return (Atomic::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0;
   }
 
   bool is_clear() const {
-    return (OrderAccess::load_acquire(&value)) == 0;
+    return (Atomic::load_acquire(&value)) == 0;
   }
 
   void set_cond(uint mask, bool val) {
@@ -211,11 +211,11 @@
   void set(T v) {
     assert (v >= 0, "sanity");
     assert (v < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
-    OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)v);
+    Atomic::release_store_fence(&value, (ShenandoahSharedValue)v);
   }
 
   T get() const {
-    return (T)OrderAccess::load_acquire(&value);
+    return (T)Atomic::load_acquire(&value);
   }
 
   T cmpxchg(T new_value, T expected) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -756,12 +756,12 @@
       if (r->is_humongous()) {
         // For humongous objects, test if start region is marked live, and if so,
         // all humongous regions in that chain have live data equal to their "used".
-        juint start_live = OrderAccess::load_acquire(&ld[r->humongous_start_region()->region_number()]);
+        juint start_live = Atomic::load_acquire(&ld[r->humongous_start_region()->region_number()]);
         if (start_live > 0) {
           verf_live = (juint)(r->used() / HeapWordSize);
         }
       } else {
-        verf_live = OrderAccess::load_acquire(&ld[r->region_number()]);
+        verf_live = Atomic::load_acquire(&ld[r->region_number()]);
       }
 
       size_t reg_live = r->get_live_data_words();
--- a/src/hotspot/share/gc/z/zLiveMap.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/z/zLiveMap.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -54,9 +54,9 @@
 
   // Multiple threads can enter here, make sure only one of them
   // resets the marking information while the others busy wait.
-  for (uint32_t seqnum = OrderAccess::load_acquire(&_seqnum);
+  for (uint32_t seqnum = Atomic::load_acquire(&_seqnum);
        seqnum != ZGlobalSeqNum;
-       seqnum = OrderAccess::load_acquire(&_seqnum)) {
+       seqnum = Atomic::load_acquire(&_seqnum)) {
     if ((seqnum != seqnum_initializing) &&
         (Atomic::cmpxchg(seqnum_initializing, &_seqnum, seqnum) == seqnum)) {
       // Reset marking information
@@ -73,7 +73,7 @@
       // before the update of the page seqnum, such that when the
       // up-to-date seqnum is load acquired, the bit maps will not
       // contain stale information.
-      OrderAccess::release_store(&_seqnum, ZGlobalSeqNum);
+      Atomic::release_store(&_seqnum, ZGlobalSeqNum);
       break;
     }
 
--- a/src/hotspot/share/gc/z/zLiveMap.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/z/zLiveMap.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -39,7 +39,7 @@
 }
 
 inline bool ZLiveMap::is_marked() const {
-  return OrderAccess::load_acquire(&_seqnum) == ZGlobalSeqNum;
+  return Atomic::load_acquire(&_seqnum) == ZGlobalSeqNum;
 }
 
 inline uint32_t ZLiveMap::live_objects() const {
--- a/src/hotspot/share/gc/z/zNMethodData.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/z/zNMethodData.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -78,7 +78,7 @@
 }
 
 ZNMethodDataOops* ZNMethodData::oops() const {
-  return OrderAccess::load_acquire(&_oops);
+  return Atomic::load_acquire(&_oops);
 }
 
 ZNMethodDataOops* ZNMethodData::swap_oops(ZNMethodDataOops* new_oops) {
--- a/src/hotspot/share/gc/z/zObjectAllocator.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -82,7 +82,7 @@
                                                         size_t size,
                                                         ZAllocationFlags flags) {
   uintptr_t addr = 0;
-  ZPage* page = OrderAccess::load_acquire(shared_page);
+  ZPage* page = Atomic::load_acquire(shared_page);
 
   if (page != NULL) {
     addr = page->alloc_object_atomic(size);
@@ -304,7 +304,7 @@
 size_t ZObjectAllocator::remaining() const {
   assert(ZThread::is_java(), "Should be a Java thread");
 
-  const ZPage* const page = OrderAccess::load_acquire(shared_small_page_addr());
+  const ZPage* const page = Atomic::load_acquire(shared_small_page_addr());
   if (page != NULL) {
     return page->remaining();
   }
--- a/src/hotspot/share/interpreter/oopMapCache.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/interpreter/oopMapCache.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -448,7 +448,7 @@
 }
 
 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
-  return OrderAccess::load_acquire(&(_array[i % _size]));
+  return Atomic::load_acquire(&(_array[i % _size]));
 }
 
 bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -27,7 +27,7 @@
 
 #include "jfr/utilities/jfrTypes.hpp"
 #include "memory/allocation.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
 
 #define USED_BIT                             1
 #define METHOD_USED_BIT                      (USED_BIT << 2)
@@ -91,16 +91,16 @@
   }
 
   static bool has_changed_tag_state() {
-    if (OrderAccess::load_acquire(&_tag_state)) {
-      OrderAccess::release_store(&_tag_state, false);
+    if (Atomic::load_acquire(&_tag_state)) {
+      Atomic::release_store(&_tag_state, false);
       return true;
     }
     return false;
   }
 
   static void set_changed_tag_state() {
-    if (!OrderAccess::load_acquire(&_tag_state)) {
-      OrderAccess::release_store(&_tag_state, true);
+    if (!Atomic::load_acquire(&_tag_state)) {
+      Atomic::release_store(&_tag_state, true);
     }
   }
 };
--- a/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -46,19 +46,19 @@
 
 inline void set_generation(uint64_t value, uint64_t* const dest) {
   assert(dest != NULL, "invariant");
-  OrderAccess::release_store(dest, value);
+  Atomic::release_store(dest, value);
 }
 static void increment_store_generation() {
-  const uint64_t current_serialized = OrderAccess::load_acquire(&serialized_generation);
-  const uint64_t current_stored = OrderAccess::load_acquire(&store_generation);
+  const uint64_t current_serialized = Atomic::load_acquire(&serialized_generation);
+  const uint64_t current_stored = Atomic::load_acquire(&store_generation);
   if (current_serialized == current_stored) {
     set_generation(current_serialized + 1, &store_generation);
   }
 }
 
 static bool increment_serialized_generation() {
-  const uint64_t current_stored = OrderAccess::load_acquire(&store_generation);
-  const uint64_t current_serialized = OrderAccess::load_acquire(&serialized_generation);
+  const uint64_t current_stored = Atomic::load_acquire(&store_generation);
+  const uint64_t current_serialized = Atomic::load_acquire(&serialized_generation);
   if (current_stored != current_serialized) {
     set_generation(current_stored, &serialized_generation);
     return true;
--- a/src/hotspot/share/jfr/utilities/jfrHashtable.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/jfr/utilities/jfrHashtable.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -58,9 +58,9 @@
   TableEntry* _entry;
 
   TableEntry* get_entry() const {
-    return (TableEntry*)OrderAccess::load_acquire(&_entry);
+    return (TableEntry*)Atomic::load_acquire(&_entry);
   }
-  void set_entry(TableEntry* entry) { OrderAccess::release_store(&_entry, entry);}
+  void set_entry(TableEntry* entry) { Atomic::release_store(&_entry, entry);}
   TableEntry** entry_addr() { return &_entry; }
 };
 
--- a/src/hotspot/share/logging/logDecorations.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/logging/logDecorations.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -44,7 +44,7 @@
 }
 
 const char* LogDecorations::host_name() {
-  const char* host_name = OrderAccess::load_acquire(&_host_name);
+  const char* host_name = Atomic::load_acquire(&_host_name);
   if (host_name == NULL) {
     char buffer[1024];
     if (os::get_host_name(buffer, sizeof(buffer))) {
--- a/src/hotspot/share/memory/metaspace.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/memory/metaspace.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -128,7 +128,7 @@
 }
 
 size_t MetaspaceGC::capacity_until_GC() {
-  size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
+  size_t value = Atomic::load_acquire(&_capacity_until_GC);
   assert(value >= MetaspaceSize, "Not initialized properly?");
   return value;
 }
--- a/src/hotspot/share/oops/accessBackend.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/accessBackend.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -134,7 +134,7 @@
   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
     OrderAccess::fence();
   }
-  return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
+  return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
 }
 
 template <DecoratorSet decorators>
@@ -142,7 +142,7 @@
 inline typename EnableIf<
   HasDecorator<ds, MO_ACQUIRE>::value, T>::type
 RawAccessBarrier<decorators>::load_internal(void* addr) {
-  return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
+  return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
 }
 
 template <DecoratorSet decorators>
@@ -158,7 +158,7 @@
 inline typename EnableIf<
   HasDecorator<ds, MO_SEQ_CST>::value>::type
 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
-  OrderAccess::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
+  Atomic::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
 }
 
 template <DecoratorSet decorators>
@@ -166,7 +166,7 @@
 inline typename EnableIf<
   HasDecorator<ds, MO_RELEASE>::value>::type
 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
-  OrderAccess::release_store(reinterpret_cast<volatile T*>(addr), value);
+  Atomic::release_store(reinterpret_cast<volatile T*>(addr), value);
 }
 
 template <DecoratorSet decorators>
--- a/src/hotspot/share/oops/array.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/array.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -27,7 +27,7 @@
 
 #include "memory/allocation.hpp"
 #include "memory/metaspace.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
 #include "utilities/align.hpp"
 
 // Array for metadata allocation
@@ -122,8 +122,8 @@
   T*   adr_at(const int i)             { assert(i >= 0 && i< _length, "oob: 0 <= %d < %d", i, _length); return &_data[i]; }
   int  find(const T& x)                { return index_of(x); }
 
-  T at_acquire(const int i)            { return OrderAccess::load_acquire(adr_at(i)); }
-  void release_at_put(int i, T x)      { OrderAccess::release_store(adr_at(i), x); }
+  T at_acquire(const int i)            { return Atomic::load_acquire(adr_at(i)); }
+  void release_at_put(int i, T x)      { Atomic::release_store(adr_at(i), x); }
 
   static int size(int length) {
     size_t bytes = align_up(byte_sizeof(length), BytesPerWord);
--- a/src/hotspot/share/oops/arrayKlass.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/arrayKlass.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -29,11 +29,11 @@
 #include "oops/arrayKlass.hpp"
 
 inline Klass* ArrayKlass::higher_dimension_acquire() const {
-  return OrderAccess::load_acquire(&_higher_dimension);
+  return Atomic::load_acquire(&_higher_dimension);
 }
 
 inline void ArrayKlass::release_set_higher_dimension(Klass* k) {
-  OrderAccess::release_store(&_higher_dimension, k);
+  Atomic::release_store(&_higher_dimension, k);
 }
 
 #endif // SHARE_OOPS_ARRAYKLASS_INLINE_HPP
--- a/src/hotspot/share/oops/constantPool.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/constantPool.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -232,7 +232,7 @@
   symbol_at_put(name_index, name);
   name->increment_refcount();
   Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
-  OrderAccess::release_store(adr, k);
+  Atomic::release_store(adr, k);
 
   // The interpreter assumes when the tag is stored, the klass is resolved
   // and the Klass* non-NULL, so we need hardware store ordering here.
@@ -249,7 +249,7 @@
   CPKlassSlot kslot = klass_slot_at(class_index);
   int resolved_klass_index = kslot.resolved_klass_index();
   Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
-  OrderAccess::release_store(adr, k);
+  Atomic::release_store(adr, k);
 
   // The interpreter assumes when the tag is stored, the klass is resolved
   // and the Klass* non-NULL, so we need hardware store ordering here.
@@ -525,7 +525,7 @@
     trace_class_resolution(this_cp, k);
   }
   Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index);
-  OrderAccess::release_store(adr, k);
+  Atomic::release_store(adr, k);
   // The interpreter assumes when the tag is stored, the klass is resolved
   // and the Klass* stored in _resolved_klasses is non-NULL, so we need
   // hardware store ordering here.
--- a/src/hotspot/share/oops/constantPool.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/constantPool.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -33,7 +33,7 @@
   assert(is_within_bounds(which), "index out of bounds");
   assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool");
   // Uses volatile because the klass slot changes without a lock.
-  intptr_t adr = OrderAccess::load_acquire(obj_at_addr(which));
+  intptr_t adr = Atomic::load_acquire(obj_at_addr(which));
   assert(adr != 0 || which == 0, "cp entry for klass should not be zero");
   return CPSlot(adr);
 }
@@ -46,7 +46,7 @@
   assert(tag_at(kslot.name_index()).is_symbol(), "sanity");
 
   Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index());
-  return OrderAccess::load_acquire(adr);
+  return Atomic::load_acquire(adr);
 }
 
 inline bool ConstantPool::is_pseudo_string_at(int which) {
--- a/src/hotspot/share/oops/cpCache.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/cpCache.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -97,7 +97,7 @@
   assert(c == 0 || c == code || code == 0, "update must be consistent");
 #endif
   // Need to flush pending stores here before bytecode is written.
-  OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift));
+  Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift));
 }
 
 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
@@ -107,17 +107,17 @@
   assert(c == 0 || c == code || code == 0, "update must be consistent");
 #endif
   // Need to flush pending stores here before bytecode is written.
-  OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift));
+  Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift));
 }
 
 // Sets f1, ordering with previous writes.
 void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
   assert(f1 != NULL, "");
-  OrderAccess::release_store(&_f1, f1);
+  Atomic::release_store(&_f1, f1);
 }
 
 void ConstantPoolCacheEntry::set_indy_resolution_failed() {
-  OrderAccess::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift));
+  Atomic::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift));
 }
 
 // Note that concurrent update of both bytecodes can leave one of them
--- a/src/hotspot/share/oops/cpCache.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/cpCache.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -29,7 +29,7 @@
 #include "oops/oopHandle.inline.hpp"
 #include "runtime/orderAccess.hpp"
 
-inline int ConstantPoolCacheEntry::indices_ord() const { return OrderAccess::load_acquire(&_indices); }
+inline int ConstantPoolCacheEntry::indices_ord() const { return Atomic::load_acquire(&_indices); }
 
 inline Bytecodes::Code ConstantPoolCacheEntry::bytecode_1() const {
   return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask);
@@ -53,7 +53,7 @@
   return (Method*)_f2;
 }
 
-inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)OrderAccess::load_acquire(&_f1); }
+inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)Atomic::load_acquire(&_f1); }
 
 inline Method* ConstantPoolCacheEntry::f1_as_method() const {
   Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), "");
@@ -75,7 +75,7 @@
   return (!is_f1_null()) && (_flags & (1 << has_local_signature_shift)) != 0;
 }
 
-inline intx ConstantPoolCacheEntry::flags_ord() const   { return (intx)OrderAccess::load_acquire(&_flags); }
+inline intx ConstantPoolCacheEntry::flags_ord() const   { return (intx)Atomic::load_acquire(&_flags); }
 
 inline bool ConstantPoolCacheEntry::indy_resolution_failed() const {
   intx flags = flags_ord();
--- a/src/hotspot/share/oops/instanceKlass.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -1097,7 +1097,7 @@
     return NULL;
   } else {
     // This load races with inserts, and therefore needs acquire.
-    Klass* kls = OrderAccess::load_acquire(k);
+    Klass* kls = Atomic::load_acquire(k);
     if (kls != NULL && !kls->is_loader_alive()) {
       return NULL;  // don't return unloaded class
     } else {
@@ -1113,7 +1113,7 @@
   Klass* volatile* addr = adr_implementor();
   assert(addr != NULL, "null addr");
   if (addr != NULL) {
-    OrderAccess::release_store(addr, k);
+    Atomic::release_store(addr, k);
   }
 }
 
@@ -1370,14 +1370,14 @@
   InterpreterOopMap* entry_for) {
   // Lazily create the _oop_map_cache at first request
   // Lock-free access requires load_acquire.
-  OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache);
+  OopMapCache* oop_map_cache = Atomic::load_acquire(&_oop_map_cache);
   if (oop_map_cache == NULL) {
     MutexLocker x(OopMapCacheAlloc_lock);
     // Check if _oop_map_cache was allocated while we were waiting for this lock
     if ((oop_map_cache = _oop_map_cache) == NULL) {
       oop_map_cache = new OopMapCache();
       // Ensure _oop_map_cache is stable, since it is examined without a lock
-      OrderAccess::release_store(&_oop_map_cache, oop_map_cache);
+      Atomic::release_store(&_oop_map_cache, oop_map_cache);
     }
   }
   // _oop_map_cache is constant after init; lookup below does its own locking.
@@ -2114,7 +2114,7 @@
     // The jmethodID cache can be read while unlocked so we have to
     // make sure the new jmethodID is complete before installing it
     // in the cache.
-    OrderAccess::release_store(&jmeths[idnum+1], id);
+    Atomic::release_store(&jmeths[idnum+1], id);
   } else {
     *to_dealloc_id_p = new_id; // save new id for later delete
   }
@@ -2196,7 +2196,7 @@
     assert (ClassUnloading, "only called for ClassUnloading");
     for (;;) {
       // Use load_acquire due to competing with inserts
-      Klass* impl = OrderAccess::load_acquire(adr_implementor());
+      Klass* impl = Atomic::load_acquire(adr_implementor());
       if (impl != NULL && !impl->is_loader_alive()) {
         // NULL this field, might be an unloaded klass or NULL
         Klass* volatile* klass = adr_implementor();
--- a/src/hotspot/share/oops/instanceKlass.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/instanceKlass.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -35,19 +35,19 @@
 #include "utilities/macros.hpp"
 
 inline Klass* InstanceKlass::array_klasses_acquire() const {
-  return OrderAccess::load_acquire(&_array_klasses);
+  return Atomic::load_acquire(&_array_klasses);
 }
 
 inline void InstanceKlass::release_set_array_klasses(Klass* k) {
-  OrderAccess::release_store(&_array_klasses, k);
+  Atomic::release_store(&_array_klasses, k);
 }
 
 inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const {
-  return OrderAccess::load_acquire(&_methods_jmethod_ids);
+  return Atomic::load_acquire(&_methods_jmethod_ids);
 }
 
 inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) {
-  OrderAccess::release_store(&_methods_jmethod_ids, jmeths);
+  Atomic::release_store(&_methods_jmethod_ids, jmeths);
 }
 
 // The iteration over the oops in objects is a hot path in the GC code.
--- a/src/hotspot/share/oops/klass.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/klass.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -362,7 +362,7 @@
 Klass* Klass::subklass(bool log) const {
   // Need load_acquire on the _subklass, because it races with inserts that
   // publishes freshly initialized data.
-  for (Klass* chain = OrderAccess::load_acquire(&_subklass);
+  for (Klass* chain = Atomic::load_acquire(&_subklass);
        chain != NULL;
        // Do not need load_acquire on _next_sibling, because inserts never
        // create _next_sibling edges to dead data.
@@ -402,7 +402,7 @@
 
 void Klass::set_subklass(Klass* s) {
   assert(s != this, "sanity check");
-  OrderAccess::release_store(&_subklass, s);
+  Atomic::release_store(&_subklass, s);
 }
 
 void Klass::set_next_sibling(Klass* s) {
@@ -427,7 +427,7 @@
   super->clean_subklass();
 
   for (;;) {
-    Klass* prev_first_subklass = OrderAccess::load_acquire(&_super->_subklass);
+    Klass* prev_first_subklass = Atomic::load_acquire(&_super->_subklass);
     if (prev_first_subklass != NULL) {
       // set our sibling to be the superklass' previous first subklass
       assert(prev_first_subklass->is_loader_alive(), "May not attach not alive klasses");
@@ -446,7 +446,7 @@
 void Klass::clean_subklass() {
   for (;;) {
     // Need load_acquire, due to contending with concurrent inserts
-    Klass* subklass = OrderAccess::load_acquire(&_subklass);
+    Klass* subklass = Atomic::load_acquire(&_subklass);
     if (subklass == NULL || subklass->is_loader_alive()) {
       return;
     }
--- a/src/hotspot/share/oops/method.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/method.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -1247,7 +1247,7 @@
 }
 
 address Method::from_compiled_entry_no_trampoline() const {
-  CompiledMethod *code = OrderAccess::load_acquire(&_code);
+  CompiledMethod *code = Atomic::load_acquire(&_code);
   if (code) {
     return code->verified_entry_point();
   } else {
@@ -1273,7 +1273,7 @@
 // Not inline to avoid circular ref.
 bool Method::check_code() const {
   // cached in a register or local.  There's a race on the value of the field.
-  CompiledMethod *code = OrderAccess::load_acquire(&_code);
+  CompiledMethod *code = Atomic::load_acquire(&_code);
   return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method());
 }
 
--- a/src/hotspot/share/oops/method.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/method.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -29,23 +29,23 @@
 #include "runtime/orderAccess.hpp"
 
 inline address Method::from_compiled_entry() const {
-  return OrderAccess::load_acquire(&_from_compiled_entry);
+  return Atomic::load_acquire(&_from_compiled_entry);
 }
 
 inline address Method::from_interpreted_entry() const {
-  return OrderAccess::load_acquire(&_from_interpreted_entry);
+  return Atomic::load_acquire(&_from_interpreted_entry);
 }
 
 inline void Method::set_method_data(MethodData* data) {
   // The store into method must be released. On platforms without
   // total store order (TSO) the reference may become visible before
   // the initialization of data otherwise.
-  OrderAccess::release_store(&_method_data, data);
+  Atomic::release_store(&_method_data, data);
 }
 
 inline CompiledMethod* volatile Method::code() const {
   assert( check_code(), "" );
-  return OrderAccess::load_acquire(&_code);
+  return Atomic::load_acquire(&_code);
 }
 
 // Write (bci, line number) pair to stream
--- a/src/hotspot/share/oops/methodData.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/methodData.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -1415,7 +1415,7 @@
 
   for (;; dp = next_extra(dp)) {
     assert(dp < end, "moved past end of extra data");
-    // No need for "OrderAccess::load_acquire" ops,
+    // No need for "Atomic::load_acquire" ops,
     // since the data structure is monotonic.
     switch(dp->tag()) {
     case DataLayout::no_tag:
@@ -1550,7 +1550,7 @@
   DataLayout* end   = args_data_limit();
   for (;; dp = next_extra(dp)) {
     assert(dp < end, "moved past end of extra data");
-    // No need for "OrderAccess::load_acquire" ops,
+    // No need for "Atomic::load_acquire" ops,
     // since the data structure is monotonic.
     switch(dp->tag()) {
     case DataLayout::no_tag:
--- a/src/hotspot/share/oops/methodData.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/methodData.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -29,7 +29,7 @@
 #include "runtime/orderAccess.hpp"
 
 inline void DataLayout::release_set_cell_at(int index, intptr_t value) {
-  OrderAccess::release_store(&_cells[index], value);
+  Atomic::release_store(&_cells[index], value);
 }
 
 inline void ProfileData::release_set_intptr_at(int index, intptr_t value) {
--- a/src/hotspot/share/oops/oop.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/oops/oop.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -110,9 +110,9 @@
     // Workaround for non-const load_acquire parameter.
     const volatile narrowKlass* addr = &_metadata._compressed_klass;
     volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
-    return CompressedKlassPointers::decode(OrderAccess::load_acquire(xaddr));
+    return CompressedKlassPointers::decode(Atomic::load_acquire(xaddr));
   } else {
-    return OrderAccess::load_acquire(&_metadata._klass);
+    return Atomic::load_acquire(&_metadata._klass);
   }
 }
 
@@ -156,10 +156,10 @@
 void oopDesc::release_set_klass(HeapWord* mem, Klass* klass) {
   CHECK_SET_KLASS(klass);
   if (UseCompressedClassPointers) {
-    OrderAccess::release_store(compressed_klass_addr(mem),
-                               CompressedKlassPointers::encode_not_null(klass));
+    Atomic::release_store(compressed_klass_addr(mem),
+                          CompressedKlassPointers::encode_not_null(klass));
   } else {
-    OrderAccess::release_store(klass_addr(mem), klass);
+    Atomic::release_store(klass_addr(mem), klass);
   }
 }
 
@@ -356,7 +356,7 @@
 // The forwardee is used when copying during scavenge and mark-sweep.
 // It does need to clear the low two locking- and GC-related bits.
 oop oopDesc::forwardee_acquire() const {
-  return (oop) OrderAccess::load_acquire(&_mark).decode_pointer();
+  return (oop) Atomic::load_acquire(&_mark).decode_pointer();
 }
 
 // The following method needs to be MT safe.
--- a/src/hotspot/share/prims/jni.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/prims/jni.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -274,7 +274,7 @@
     uintx count = 0;
 
     while (Atomic::cmpxchg(1, &JNIHistogram_lock, 0) != 0) {
-      while (OrderAccess::load_acquire(&JNIHistogram_lock) != 0) {
+      while (Atomic::load_acquire(&JNIHistogram_lock) != 0) {
         count +=1;
         if ( (WarnOnStalledSpinLock > 0)
           && (count % WarnOnStalledSpinLock == 0)) {
@@ -3916,7 +3916,7 @@
     *(JNIEnv**)penv = 0;
     // reset vm_created last to avoid race condition. Use OrderAccess to
     // control both compiler and architectural-based reordering.
-    OrderAccess::release_store(&vm_created, 0);
+    Atomic::release_store(&vm_created, 0);
   }
 
   // Flush stdout and stderr before exit.
--- a/src/hotspot/share/prims/jvm.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/prims/jvm.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -234,7 +234,7 @@
     uintx count = 0;
 
     while (Atomic::cmpxchg(1, &JVMHistogram_lock, 0) != 0) {
-      while (OrderAccess::load_acquire(&JVMHistogram_lock) != 0) {
+      while (Atomic::load_acquire(&JVMHistogram_lock) != 0) {
         count +=1;
         if ( (WarnOnStalledSpinLock > 0)
           && (count % WarnOnStalledSpinLock == 0)) {
--- a/src/hotspot/share/prims/jvmtiEnvBase.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/prims/jvmtiEnvBase.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -255,11 +255,11 @@
   }
 
   JvmtiTagMap* tag_map_acquire() {
-    return OrderAccess::load_acquire(&_tag_map);
+    return Atomic::load_acquire(&_tag_map);
   }
 
   void release_set_tag_map(JvmtiTagMap* tag_map) {
-    OrderAccess::release_store(&_tag_map, tag_map);
+    Atomic::release_store(&_tag_map, tag_map);
   }
 
   // return true if event is enabled globally or for any thread
--- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -147,7 +147,7 @@
 
 void JvmtiRawMonitor::simple_exit(Thread* self) {
   guarantee(_owner == self, "invariant");
-  OrderAccess::release_store(&_owner, (Thread*)NULL);
+  Atomic::release_store(&_owner, (Thread*)NULL);
   OrderAccess::fence();
   if (_entry_list == NULL) {
     return;
--- a/src/hotspot/share/runtime/atomic.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/atomic.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -34,6 +34,7 @@
 #include "metaprogramming/primitiveConversions.hpp"
 #include "metaprogramming/removeCV.hpp"
 #include "metaprogramming/removePointer.hpp"
+#include "runtime/orderAccess.hpp"
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
 
@@ -48,6 +49,12 @@
   memory_order_conservative = 8
 };
 
+enum ScopedFenceType {
+    X_ACQUIRE
+  , RELEASE_X
+  , RELEASE_X_FENCE
+};
+
 class Atomic : AllStatic {
 public:
   // Atomic operations on int64 types are not available on all 32-bit
@@ -75,12 +82,21 @@
   template<typename T, typename D>
   inline static void store(T store_value, volatile D* dest);
 
+  template <typename T, typename D>
+  inline static void release_store(volatile D* dest, T store_value);
+
+  template <typename T, typename D>
+  inline static void release_store_fence(volatile D* dest, T store_value);
+
   // Atomically load from a location
   // The type T must be either a pointer type, an integral/enum type,
   // or a type that is primitive convertible using PrimitiveConversions.
   template<typename T>
   inline static T load(const volatile T* dest);
 
+  template <typename T>
+  inline static T load_acquire(const volatile T* dest);
+
   // Atomically add to a location. Returns updated value. add*() provide:
   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
 
@@ -200,6 +216,10 @@
   // requires more for e.g. 64 bit loads, a specialization is required
   template<size_t byte_size> struct PlatformLoad;
 
+  // Give platforms a variation point to specialize.
+  template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
+  template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
+
 private:
   // Dispatch handler for add.  Provides type-based validity checking
   // and limited conversions around calls to the platform-specific
@@ -578,6 +598,32 @@
                atomic_memory_order order) const;
 };
 
+template <ScopedFenceType T>
+class ScopedFenceGeneral: public StackObj {
+ public:
+  void prefix() {}
+  void postfix() {}
+};
+
+// The following methods can be specialized using simple template specialization
+// in the platform specific files for optimization purposes. Otherwise the
+// generalized variant is used.
+
+template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix()       { OrderAccess::acquire(); }
+template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix()        { OrderAccess::release(); }
+template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix()  { OrderAccess::release(); }
+template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence();   }
+
+template <ScopedFenceType T>
+class ScopedFence : public ScopedFenceGeneral<T> {
+  void *const _field;
+ public:
+  ScopedFence(void *const field) : _field(field) { prefix(); }
+  ~ScopedFence() { postfix(); }
+  void prefix() { ScopedFenceGeneral<T>::prefix(); }
+  void postfix() { ScopedFenceGeneral<T>::postfix(); }
+};
+
 // platform specific in-line definitions - must come before shared definitions
 
 #include OS_CPU_HEADER(atomic)
@@ -594,11 +640,44 @@
   return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
 }
 
+template<size_t byte_size, ScopedFenceType type>
+struct Atomic::PlatformOrderedLoad {
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    ScopedFence<type> f((void*)p);
+    return Atomic::load(p);
+  }
+};
+
+template <typename T>
+inline T Atomic::load_acquire(const volatile T* p) {
+  return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
+}
+
 template<typename T, typename D>
 inline void Atomic::store(T store_value, volatile D* dest) {
   StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
 }
 
+template<size_t byte_size, ScopedFenceType type>
+struct Atomic::PlatformOrderedStore {
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    ScopedFence<type> f((void*)p);
+    Atomic::store(v, p);
+  }
+};
+
+template <typename T, typename D>
+inline void Atomic::release_store(volatile D* p, T v) {
+  StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
+}
+
+template <typename T, typename D>
+inline void Atomic::release_store_fence(volatile D* p, T v) {
+  StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
+}
+
 template<typename I, typename D>
 inline D Atomic::add(I add_value, D volatile* dest,
                      atomic_memory_order order) {
--- a/src/hotspot/share/runtime/handshake.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/handshake.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -294,7 +294,7 @@
   if (!_semaphore.trywait()) {
     _semaphore.wait_with_safepoint_check(thread);
   }
-  HandshakeOperation* op = OrderAccess::load_acquire(&_operation);
+  HandshakeOperation* op = Atomic::load_acquire(&_operation);
   if (op != NULL) {
     HandleMark hm(thread);
     CautiouslyPreserveExceptionMark pem(thread);
--- a/src/hotspot/share/runtime/init.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/init.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -195,7 +195,7 @@
 static volatile bool _init_completed = false;
 
 bool is_init_completed() {
-  return OrderAccess::load_acquire(&_init_completed);
+  return Atomic::load_acquire(&_init_completed);
 }
 
 void wait_init_completed() {
@@ -208,6 +208,6 @@
 void set_init_completed() {
   assert(Universe::is_fully_initialized(), "Should have completed initialization");
   MonitorLocker ml(InitCompleted_lock, Monitor::_no_safepoint_check_flag);
-  OrderAccess::release_store(&_init_completed, true);
+  Atomic::release_store(&_init_completed, true);
   ml.notify_all();
 }
--- a/src/hotspot/share/runtime/interfaceSupport.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/interfaceSupport.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -88,7 +88,7 @@
   uintx count = 0;
 
   while (Atomic::cmpxchg(1, &RuntimeHistogram_lock, 0) != 0) {
-    while (OrderAccess::load_acquire(&RuntimeHistogram_lock) != 0) {
+    while (Atomic::load_acquire(&RuntimeHistogram_lock) != 0) {
       count +=1;
       if ( (WarnOnStalledSpinLock > 0)
         && (count % WarnOnStalledSpinLock == 0)) {
--- a/src/hotspot/share/runtime/objectMonitor.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/objectMonitor.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -916,8 +916,8 @@
 
     // release semantics: prior loads and stores from within the critical section
     // must not float (reorder) past the following store that drops the lock.
-    OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
-    OrderAccess::storeload();                        // See if we need to wake a successor
+    Atomic::release_store(&_owner, (void*)NULL);   // drop the lock
+    OrderAccess::storeload();                      // See if we need to wake a successor
     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
       return;
     }
@@ -1092,7 +1092,7 @@
   Wakee  = NULL;
 
   // Drop the lock
-  OrderAccess::release_store(&_owner, (void*)NULL);
+  Atomic::release_store(&_owner, (void*)NULL);
   OrderAccess::fence();                               // ST _owner vs LD in unpark()
 
   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
--- a/src/hotspot/share/runtime/orderAccess.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/orderAccess.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -26,7 +26,6 @@
 #define SHARE_RUNTIME_ORDERACCESS_HPP
 
 #include "memory/allocation.hpp"
-#include "runtime/atomic.hpp"
 #include "utilities/macros.hpp"
 
 //                Memory Access Ordering Model
@@ -231,30 +230,7 @@
 // order.  If their implementations change such that these assumptions
 // are violated, a whole lot of code will break.
 
-enum ScopedFenceType {
-    X_ACQUIRE
-  , RELEASE_X
-  , RELEASE_X_FENCE
-};
-
-template <ScopedFenceType T>
-class ScopedFenceGeneral: public StackObj {
- public:
-  void prefix() {}
-  void postfix() {}
-};
-
-template <ScopedFenceType T>
-class ScopedFence : public ScopedFenceGeneral<T> {
-  void *const _field;
- public:
-  ScopedFence(void *const field) : _field(field) { prefix(); }
-  ~ScopedFence() { postfix(); }
-  void prefix() { ScopedFenceGeneral<T>::prefix(); }
-  void postfix() { ScopedFenceGeneral<T>::postfix(); }
-};
-
-class OrderAccess : private Atomic {
+class OrderAccess : public AllStatic {
  public:
   // barriers
   static void     loadload();
@@ -267,85 +243,13 @@
   static void     fence();
 
   static void     cross_modify_fence();
-
-  template <typename T>
-  static T        load_acquire(const volatile T* p);
-
-  template <typename T, typename D>
-  static void     release_store(volatile D* p, T v);
-
-  template <typename T, typename D>
-  static void     release_store_fence(volatile D* p, T v);
-
- private:
+private:
   // This is a helper that invokes the StubRoutines::fence_entry()
   // routine if it exists, It should only be used by platforms that
   // don't have another way to do the inline assembly.
   static void StubRoutines_fence();
-
-  // Give platforms a variation point to specialize.
-  template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
-  template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
-
-  template<typename FieldType, ScopedFenceType FenceType>
-  static void ordered_store(volatile FieldType* p, FieldType v);
-
-  template<typename FieldType, ScopedFenceType FenceType>
-  static FieldType ordered_load(const volatile FieldType* p);
-};
-
-// The following methods can be specialized using simple template specialization
-// in the platform specific files for optimization purposes. Otherwise the
-// generalized variant is used.
-
-template<size_t byte_size, ScopedFenceType type>
-struct OrderAccess::PlatformOrderedStore {
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    ordered_store<T, type>(p, v);
-  }
-};
-
-template<size_t byte_size, ScopedFenceType type>
-struct OrderAccess::PlatformOrderedLoad {
-  template <typename T>
-  T operator()(const volatile T* p) const {
-    return ordered_load<T, type>(p);
-  }
 };
 
 #include OS_CPU_HEADER(orderAccess)
 
-template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix()       { OrderAccess::acquire(); }
-template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix()        { OrderAccess::release(); }
-template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix()  { OrderAccess::release(); }
-template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence();   }
-
-
-template <typename FieldType, ScopedFenceType FenceType>
-inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) {
-  ScopedFence<FenceType> f((void*)p);
-  Atomic::store(v, p);
-}
-
-template <typename FieldType, ScopedFenceType FenceType>
-inline FieldType OrderAccess::ordered_load(const volatile FieldType* p) {
-  ScopedFence<FenceType> f((void*)p);
-  return Atomic::load(p);
-}
-
-template <typename T>
-inline T OrderAccess::load_acquire(const volatile T* p) {
-  return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
-}
-
-template <typename T, typename D>
-inline void OrderAccess::release_store(volatile D* p, T v) {
-  StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
-}
-
-template <typename T, typename D>
-inline void OrderAccess::release_store_fence(volatile D* p, T v) {
-  StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
-}
 #endif // SHARE_RUNTIME_ORDERACCESS_HPP
--- a/src/hotspot/share/runtime/perfMemory.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/perfMemory.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -156,7 +156,7 @@
   _prologue->overflow = 0;
   _prologue->mod_time_stamp = 0;
 
-  OrderAccess::release_store(&_initialized, 1);
+  Atomic::release_store(&_initialized, 1);
 }
 
 void PerfMemory::destroy() {
@@ -269,5 +269,5 @@
 }
 
 bool PerfMemory::is_initialized() {
-  return OrderAccess::load_acquire(&_initialized) != 0;
+  return Atomic::load_acquire(&_initialized) != 0;
 }
--- a/src/hotspot/share/runtime/safepoint.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/safepoint.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -328,7 +328,7 @@
 
   assert((_safepoint_counter & 0x1) == 0, "must be even");
   // The store to _safepoint_counter must happen after any stores in arming.
-  OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1);
+  Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1);
 
   // We are synchronizing
   OrderAccess::storestore(); // Ordered with _safepoint_counter
@@ -482,7 +482,7 @@
 
     // Set the next dormant (even) safepoint id.
     assert((_safepoint_counter & 0x1) == 1, "must be odd");
-    OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1);
+    Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1);
 
     OrderAccess::fence(); // Keep the local state from floating up.
 
@@ -968,15 +968,15 @@
 }
 
 uint64_t ThreadSafepointState::get_safepoint_id() const {
-  return OrderAccess::load_acquire(&_safepoint_id);
+  return Atomic::load_acquire(&_safepoint_id);
 }
 
 void ThreadSafepointState::reset_safepoint_id() {
-  OrderAccess::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter);
+  Atomic::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter);
 }
 
 void ThreadSafepointState::set_safepoint_id(uint64_t safepoint_id) {
-  OrderAccess::release_store(&_safepoint_id, safepoint_id);
+  Atomic::release_store(&_safepoint_id, safepoint_id);
 }
 
 void ThreadSafepointState::examine_state_of_thread(uint64_t safepoint_count) {
--- a/src/hotspot/share/runtime/synchronizer.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/synchronizer.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -889,7 +889,7 @@
 // Visitors ...
 
 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
-  PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
+  PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
   while (block != NULL) {
     assert(block->object() == CHAINMARKER, "must be a block header");
     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
@@ -1118,7 +1118,7 @@
     temp[0]._next_om = g_block_list;
     // There are lock-free uses of g_block_list so make sure that
     // the previous stores happen before we update g_block_list.
-    OrderAccess::release_store(&g_block_list, temp);
+    Atomic::release_store(&g_block_list, temp);
 
     // Add the new string of ObjectMonitors to the global free list
     temp[_BLOCKSIZE - 1]._next_om = g_free_list;
@@ -2169,7 +2169,7 @@
 // the list of extant blocks without taking a lock.
 
 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
-  PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
+  PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
   while (block != NULL) {
     assert(block->object() == CHAINMARKER, "must be a block header");
     if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
--- a/src/hotspot/share/runtime/thread.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/thread.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -1269,7 +1269,7 @@
 
 NonJavaThread::Iterator::Iterator() :
   _protect_enter(_the_list._protect.enter()),
-  _current(OrderAccess::load_acquire(&_the_list._head))
+  _current(Atomic::load_acquire(&_the_list._head))
 {}
 
 NonJavaThread::Iterator::~Iterator() {
@@ -1278,7 +1278,7 @@
 
 void NonJavaThread::Iterator::step() {
   assert(!end(), "precondition");
-  _current = OrderAccess::load_acquire(&_current->_next);
+  _current = Atomic::load_acquire(&_current->_next);
 }
 
 NonJavaThread::NonJavaThread() : Thread(), _next(NULL) {
@@ -1291,8 +1291,8 @@
   MutexLocker ml(NonJavaThreadsList_lock, Mutex::_no_safepoint_check_flag);
   // Initialize BarrierSet-related data before adding to list.
   BarrierSet::barrier_set()->on_thread_attach(this);
-  OrderAccess::release_store(&_next, _the_list._head);
-  OrderAccess::release_store(&_the_list._head, this);
+  Atomic::release_store(&_next, _the_list._head);
+  Atomic::release_store(&_the_list._head, this);
 }
 
 void NonJavaThread::remove_from_the_list() {
--- a/src/hotspot/share/runtime/thread.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/thread.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -67,7 +67,7 @@
 }
 
 inline jlong Thread::cooked_allocated_bytes() {
-  jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
+  jlong allocated_bytes = Atomic::load_acquire(&_allocated_bytes);
   if (UseTLAB) {
     size_t used_bytes = tlab().used_bytes();
     if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) {
@@ -87,11 +87,11 @@
 }
 
 inline ThreadsList* Thread::get_threads_hazard_ptr() {
-  return (ThreadsList*)OrderAccess::load_acquire(&_threads_hazard_ptr);
+  return (ThreadsList*)Atomic::load_acquire(&_threads_hazard_ptr);
 }
 
 inline void Thread::set_threads_hazard_ptr(ThreadsList* new_list) {
-  OrderAccess::release_store_fence(&_threads_hazard_ptr, new_list);
+  Atomic::release_store_fence(&_threads_hazard_ptr, new_list);
 }
 
 inline void JavaThread::set_ext_suspended() {
@@ -118,7 +118,7 @@
 #if defined(PPC64) || defined (AARCH64)
   // Use membars when accessing volatile _thread_state. See
   // Threads::create_vm() for size checks.
-  return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
+  return (JavaThreadState) Atomic::load_acquire((volatile jint*)&_thread_state);
 #else
   return _thread_state;
 #endif
@@ -128,7 +128,7 @@
 #if defined(PPC64) || defined (AARCH64)
   // Use membars when accessing volatile _thread_state. See
   // Threads::create_vm() for size checks.
-  OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s);
+  Atomic::release_store((volatile jint*)&_thread_state, (jint)s);
 #else
   _thread_state = s;
 #endif
@@ -200,7 +200,7 @@
 // The release make sure this store is done after storing the handshake
 // operation or global state
 inline void JavaThread::set_polling_page_release(void* poll_value) {
-  OrderAccess::release_store(polling_page_addr(), poll_value);
+  Atomic::release_store(polling_page_addr(), poll_value);
 }
 
 // Caller is responsible for using a memory barrier if needed.
@@ -211,14 +211,14 @@
 // The aqcquire make sure reading of polling page is done before
 // the reading the handshake operation or the global state
 inline volatile void* JavaThread::get_polling_page() {
-  return OrderAccess::load_acquire(polling_page_addr());
+  return Atomic::load_acquire(polling_page_addr());
 }
 
 inline bool JavaThread::is_exiting() const {
   // Use load-acquire so that setting of _terminated by
   // JavaThread::exit() is seen more quickly.
   TerminatedTypes l_terminated = (TerminatedTypes)
-      OrderAccess::load_acquire((volatile jint *) &_terminated);
+      Atomic::load_acquire((volatile jint *) &_terminated);
   return l_terminated == _thread_exiting || check_is_terminated(l_terminated);
 }
 
@@ -226,19 +226,19 @@
   // Use load-acquire so that setting of _terminated by
   // JavaThread::exit() is seen more quickly.
   TerminatedTypes l_terminated = (TerminatedTypes)
-      OrderAccess::load_acquire((volatile jint *) &_terminated);
+      Atomic::load_acquire((volatile jint *) &_terminated);
   return check_is_terminated(l_terminated);
 }
 
 inline void JavaThread::set_terminated(TerminatedTypes t) {
   // use release-store so the setting of _terminated is seen more quickly
-  OrderAccess::release_store((volatile jint *) &_terminated, (jint) t);
+  Atomic::release_store((volatile jint *) &_terminated, (jint) t);
 }
 
 // special for Threads::remove() which is static:
 inline void JavaThread::set_terminated_value() {
   // use release-store so the setting of _terminated is seen more quickly
-  OrderAccess::release_store((volatile jint *) &_terminated, (jint) _thread_terminated);
+  Atomic::release_store((volatile jint *) &_terminated, (jint) _thread_terminated);
 }
 
 // Allow tracking of class initialization monitor use
--- a/src/hotspot/share/runtime/threadHeapSampler.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/threadHeapSampler.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -139,9 +139,9 @@
 }
 
 int ThreadHeapSampler::get_sampling_interval() {
-  return OrderAccess::load_acquire(&_sampling_interval);
+  return Atomic::load_acquire(&_sampling_interval);
 }
 
 void ThreadHeapSampler::set_sampling_interval(int sampling_interval) {
-  OrderAccess::release_store(&_sampling_interval, sampling_interval);
+  Atomic::release_store(&_sampling_interval, sampling_interval);
 }
--- a/src/hotspot/share/runtime/threadSMR.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/threadSMR.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -779,7 +779,7 @@
 bool ThreadsSMRSupport::delete_notify() {
   // Use load_acquire() in order to see any updates to _delete_notify
   // earlier than when delete_lock is grabbed.
-  return (OrderAccess::load_acquire(&_delete_notify) != 0);
+  return (Atomic::load_acquire(&_delete_notify) != 0);
 }
 
 // Safely free a ThreadsList after a Threads::add() or Threads::remove().
--- a/src/hotspot/share/runtime/threadSMR.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/threadSMR.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -78,7 +78,7 @@
 }
 
 inline ThreadsList* ThreadsSMRSupport::get_java_thread_list() {
-  return (ThreadsList*)OrderAccess::load_acquire(&_java_thread_list);
+  return (ThreadsList*)Atomic::load_acquire(&_java_thread_list);
 }
 
 inline bool ThreadsSMRSupport::is_a_protected_JavaThread_with_lock(JavaThread *thread) {
--- a/src/hotspot/share/runtime/vmThread.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/runtime/vmThread.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -203,16 +203,16 @@
 }
 
 bool VMOperationTimeoutTask::is_armed() {
-  return OrderAccess::load_acquire(&_armed) != 0;
+  return Atomic::load_acquire(&_armed) != 0;
 }
 
 void VMOperationTimeoutTask::arm() {
   _arm_time = os::javaTimeMillis();
-  OrderAccess::release_store_fence(&_armed, 1);
+  Atomic::release_store_fence(&_armed, 1);
 }
 
 void VMOperationTimeoutTask::disarm() {
-  OrderAccess::release_store_fence(&_armed, 0);
+  Atomic::release_store_fence(&_armed, 0);
 }
 
 //------------------------------------------------------------------------------------------------------------------
--- a/src/hotspot/share/services/memoryManager.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/services/memoryManager.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -65,7 +65,7 @@
 instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
   // Must do an acquire so as to force ordering of subsequent
   // loads from anything _memory_mgr_obj points to or implies.
-  instanceOop mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj);
+  instanceOop mgr_obj = Atomic::load_acquire(&_memory_mgr_obj);
   if (mgr_obj == NULL) {
     // It's ok for more than one thread to execute the code up to the locked region.
     // Extra manager instances will just be gc'ed.
@@ -118,7 +118,7 @@
       //
       // The lock has done an acquire, so the load can't float above it, but
       // we need to do a load_acquire as above.
-      mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj);
+      mgr_obj = Atomic::load_acquire(&_memory_mgr_obj);
       if (mgr_obj != NULL) {
          return mgr_obj;
       }
@@ -130,7 +130,7 @@
       // with creating the management object are visible before publishing
       // its address.  The unlock will publish the store to _memory_mgr_obj
       // because it does a release first.
-      OrderAccess::release_store(&_memory_mgr_obj, mgr_obj);
+      Atomic::release_store(&_memory_mgr_obj, mgr_obj);
     }
   }
 
--- a/src/hotspot/share/services/memoryPool.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/services/memoryPool.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -77,7 +77,7 @@
 instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
   // Must do an acquire so as to force ordering of subsequent
   // loads from anything _memory_pool_obj points to or implies.
-  instanceOop pool_obj = OrderAccess::load_acquire(&_memory_pool_obj);
+  instanceOop pool_obj = Atomic::load_acquire(&_memory_pool_obj);
   if (pool_obj == NULL) {
     // It's ok for more than one thread to execute the code up to the locked region.
     // Extra pool instances will just be gc'ed.
@@ -118,7 +118,7 @@
       //
       // The lock has done an acquire, so the load can't float above it,
       // but we need to do a load_acquire as above.
-      pool_obj = OrderAccess::load_acquire(&_memory_pool_obj);
+      pool_obj = Atomic::load_acquire(&_memory_pool_obj);
       if (pool_obj != NULL) {
          return pool_obj;
       }
@@ -130,7 +130,7 @@
       // with creating the pool are visible before publishing its address.
       // The unlock will publish the store to _memory_pool_obj because
       // it does a release first.
-      OrderAccess::release_store(&_memory_pool_obj, pool_obj);
+      Atomic::release_store(&_memory_pool_obj, pool_obj);
     }
   }
 
--- a/src/hotspot/share/utilities/bitMap.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/utilities/bitMap.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -48,7 +48,7 @@
            memory_order == memory_order_acquire ||
            memory_order == memory_order_conservative,
            "unexpected memory ordering");
-    return OrderAccess::load_acquire(addr);
+    return Atomic::load_acquire(addr);
   }
 }
 
--- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -58,7 +58,7 @@
 ConcurrentHashTable<CONFIG, F>::
   Node::next() const
 {
-  return OrderAccess::load_acquire(&_next);
+  return Atomic::load_acquire(&_next);
 }
 
 // Bucket
@@ -67,7 +67,7 @@
 ConcurrentHashTable<CONFIG, F>::
   Bucket::first_raw() const
 {
-  return OrderAccess::load_acquire(&_first);
+  return Atomic::load_acquire(&_first);
 }
 
 template <typename CONFIG, MEMFLAGS F>
@@ -79,7 +79,7 @@
   // Due to this assert this methods is not static.
   assert(is_locked(), "Must be locked.");
   Node** tmp = (Node**)dst;
-  OrderAccess::release_store(tmp, clear_set_state(node, *dst));
+  Atomic::release_store(tmp, clear_set_state(node, *dst));
 }
 
 template <typename CONFIG, MEMFLAGS F>
@@ -88,7 +88,7 @@
   Bucket::first() const
 {
   // We strip the states bit before returning the ptr.
-  return clear_state(OrderAccess::load_acquire(&_first));
+  return clear_state(Atomic::load_acquire(&_first));
 }
 
 template <typename CONFIG, MEMFLAGS F>
@@ -173,7 +173,7 @@
   assert(is_locked(), "Must be locked.");
   assert(!have_redirect(),
          "Unlocking a bucket after it has reached terminal state.");
-  OrderAccess::release_store(&_first, clear_state(first()));
+  Atomic::release_store(&_first, clear_state(first()));
 }
 
 template <typename CONFIG, MEMFLAGS F>
@@ -181,7 +181,7 @@
   Bucket::redirect()
 {
   assert(is_locked(), "Must be locked.");
-  OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
+  Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
 }
 
 // InternalTable
@@ -217,8 +217,8 @@
       _cs_context(GlobalCounter::critical_section_begin(_thread))
 {
   // This version is published now.
-  if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
-    OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
+  if (Atomic::load_acquire(&_cht->_invisible_epoch) != NULL) {
+    Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
   }
 }
 
@@ -289,13 +289,13 @@
   assert(_resize_lock_owner == thread, "Re-size lock not held");
   OrderAccess::fence(); // Prevent below load from floating up.
   // If no reader saw this version we can skip write_synchronize.
-  if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
+  if (Atomic::load_acquire(&_invisible_epoch) == thread) {
     return;
   }
   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
   // We set this/next version that we are synchronizing for to not published.
   // A reader will zero this flag if it reads this/next version.
-  OrderAccess::release_store(&_invisible_epoch, thread);
+  Atomic::release_store(&_invisible_epoch, thread);
   GlobalCounter::write_synchronize();
 }
 
@@ -374,7 +374,7 @@
 ConcurrentHashTable<CONFIG, F>::
   get_table() const
 {
-  return OrderAccess::load_acquire(&_table);
+  return Atomic::load_acquire(&_table);
 }
 
 template <typename CONFIG, MEMFLAGS F>
@@ -382,7 +382,7 @@
 ConcurrentHashTable<CONFIG, F>::
   get_new_table() const
 {
-  return OrderAccess::load_acquire(&_new_table);
+  return Atomic::load_acquire(&_new_table);
 }
 
 template <typename CONFIG, MEMFLAGS F>
@@ -392,7 +392,7 @@
 {
   InternalTable* old_table = _table;
   // Publish the new table.
-  OrderAccess::release_store(&_table, _new_table);
+  Atomic::release_store(&_table, _new_table);
   // All must see this.
   GlobalCounter::write_synchronize();
   // _new_table not read any more.
--- a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -74,7 +74,7 @@
 
   // Returns false if all ranges are claimed.
   bool have_more_work() {
-    return OrderAccess::load_acquire(&_next_to_claim) >= _stop_task;
+    return Atomic::load_acquire(&_next_to_claim) >= _stop_task;
   }
 
   void thread_owns_resize_lock(Thread* thread) {
--- a/src/hotspot/share/utilities/globalCounter.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/utilities/globalCounter.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -41,7 +41,7 @@
     SpinYield yield;
     // Loops on this thread until it has exited the critical read section.
     while(true) {
-      uintx cnt = OrderAccess::load_acquire(thread->get_rcu_counter());
+      uintx cnt = Atomic::load_acquire(thread->get_rcu_counter());
       // This checks if the thread's counter is active. And if so is the counter
       // for a pre-existing reader (belongs to this grace period). A pre-existing
       // reader will have a lower counter than the global counter version for this
--- a/src/hotspot/share/utilities/globalCounter.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/utilities/globalCounter.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -40,7 +40,7 @@
   if ((new_cnt & COUNTER_ACTIVE) == 0) {
     new_cnt = Atomic::load(&_global_counter._counter) | COUNTER_ACTIVE;
   }
-  OrderAccess::release_store_fence(thread->get_rcu_counter(), new_cnt);
+  Atomic::release_store_fence(thread->get_rcu_counter(), new_cnt);
   return static_cast<CSContext>(old_cnt);
 }
 
@@ -49,8 +49,8 @@
   assert(thread == Thread::current(), "must be current thread");
   assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
   // Restore the counter value from before the associated begin.
-  OrderAccess::release_store(thread->get_rcu_counter(),
-                             static_cast<uintx>(context));
+  Atomic::release_store(thread->get_rcu_counter(),
+                        static_cast<uintx>(context));
 }
 
 class GlobalCounter::CriticalSection {
--- a/src/hotspot/share/utilities/hashtable.inline.hpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/utilities/hashtable.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
@@ -88,7 +88,7 @@
   //          SystemDictionary are read without locks.  The new entry must be
   //          complete before other threads can be allowed to see it
   //          via a store to _buckets[index].
-  OrderAccess::release_store(&_entry, l);
+  Atomic::release_store(&_entry, l);
 }
 
 
@@ -97,7 +97,7 @@
   //          SystemDictionary are read without locks.  The new entry must be
   //          complete before other threads can be allowed to see it
   //          via a store to _buckets[index].
-  return OrderAccess::load_acquire(&_entry);
+  return Atomic::load_acquire(&_entry);
 }
 
 
--- a/src/hotspot/share/utilities/singleWriterSynchronizer.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/src/hotspot/share/utilities/singleWriterSynchronizer.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -85,7 +85,7 @@
   // to complete, e.g. for the value of old_ptr to catch up with old.
   // Loop because there could be pending wakeups unrelated to this
   // synchronize request.
-  while (old != OrderAccess::load_acquire(old_ptr)) {
+  while (old != Atomic::load_acquire(old_ptr)) {
     _wakeup.wait();
   }
   // (5) Drain any pending wakeups. A critical section exit may have
--- a/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -27,7 +27,6 @@
 #include "memory/allocation.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/semaphore.inline.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/debug.hpp"
@@ -108,7 +107,7 @@
   {}
 
   virtual void main_run() {
-    while (OrderAccess::load_acquire(_continue_running)) {
+    while (Atomic::load_acquire(_continue_running)) {
       uint id = _set->claim_par_id();
       _set->release_par_id(id);
       ++_allocations;
@@ -147,7 +146,7 @@
     ThreadInVMfromNative invm(this_thread);
     this_thread->sleep(milliseconds_to_run);
   }
-  OrderAccess::release_store(&continue_running, false);
+  Atomic::release_store(&continue_running, false);
   for (uint i = 0; i < nthreads; ++i) {
     ThreadInVMfromNative invm(this_thread);
     post.wait_with_safepoint_check(this_thread);
--- a/test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -26,7 +26,7 @@
 #include "gc/shared/ptrQueue.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/semaphore.inline.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/globalCounter.inline.hpp"
@@ -150,7 +150,7 @@
   {}
 
   virtual void main_run() {
-    while (OrderAccess::load_acquire(_continue_running)) {
+    while (Atomic::load_acquire(_continue_running)) {
       BufferNode* node = _allocator->allocate();
       _cbl->push(node);
       ++_allocations;
@@ -184,7 +184,7 @@
       BufferNode* node = _cbl->pop();
       if (node != NULL) {
         _allocator->release(node);
-      } else if (!OrderAccess::load_acquire(_continue_running)) {
+      } else if (!Atomic::load_acquire(_continue_running)) {
         return;
       }
       ThreadBlockInVM tbiv(this); // Safepoint check.
@@ -226,12 +226,12 @@
     ThreadInVMfromNative invm(this_thread);
     this_thread->sleep(milliseconds_to_run);
   }
-  OrderAccess::release_store(&allocator_running, false);
+  Atomic::release_store(&allocator_running, false);
   for (uint i = 0; i < nthreads; ++i) {
     ThreadInVMfromNative invm(this_thread);
     post.wait_with_safepoint_check(this_thread);
   }
-  OrderAccess::release_store(&processor_running, false);
+  Atomic::release_store(&processor_running, false);
   for (uint i = 0; i < nthreads; ++i) {
     ThreadInVMfromNative invm(this_thread);
     post.wait_with_safepoint_check(this_thread);
--- a/test/hotspot/gtest/utilities/test_globalCounter.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/test/hotspot/gtest/utilities/test_globalCounter.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -23,7 +23,6 @@
 
 #include "precompiled.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "utilities/globalCounter.hpp"
 #include "utilities/globalCounter.inline.hpp"
@@ -48,14 +47,14 @@
     _wrt_start->signal();
     while (!_exit) {
       GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(this);
-      volatile TestData* test = OrderAccess::load_acquire(_test);
-      long value = OrderAccess::load_acquire(&test->test_value);
+      volatile TestData* test = Atomic::load_acquire(_test);
+      long value = Atomic::load_acquire(&test->test_value);
       ASSERT_EQ(value, GOOD_VALUE);
       GlobalCounter::critical_section_end(this, cs_context);
       {
         GlobalCounter::CriticalSection cs(this);
-        volatile TestData* test = OrderAccess::load_acquire(_test);
-        long value = OrderAccess::load_acquire(&test->test_value);
+        volatile TestData* test = Atomic::load_acquire(_test);
+        long value = Atomic::load_acquire(&test->test_value);
         ASSERT_EQ(value, GOOD_VALUE);
       }
     }
@@ -82,7 +81,7 @@
 
     TestData* tmp = new TestData();
     tmp->test_value = GOOD_VALUE;
-    OrderAccess::release_store_fence(&test, tmp);
+    Atomic::release_store_fence(&test, tmp);
 
     reader1->doit();
     reader2->doit();
@@ -99,7 +98,7 @@
       volatile TestData* free_tmp = test;
       tmp = new TestData();
       tmp->test_value = GOOD_VALUE;
-      OrderAccess::release_store(&test, tmp);
+      Atomic::release_store(&test, tmp);
       GlobalCounter::write_synchronize();
       free_tmp->test_value = BAD_VALUE;
       delete free_tmp;
--- a/test/hotspot/gtest/utilities/test_globalCounter_nested.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/test/hotspot/gtest/utilities/test_globalCounter_nested.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -24,7 +24,6 @@
 #include "precompiled.hpp"
 #include "metaprogramming/isRegisteredEnum.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "utilities/globalCounter.hpp"
 #include "utilities/globalCounter.inline.hpp"
@@ -57,21 +56,21 @@
   ~RCUNestedThread() {}
 
   void set_state(NestedTestState new_state) {
-    OrderAccess::release_store(&_state, new_state);
+    Atomic::release_store(&_state, new_state);
   }
 
   void wait_with_state(NestedTestState new_state) {
     SpinYield spinner;
-    OrderAccess::release_store(&_state, new_state);
-    while (!OrderAccess::load_acquire(&_proceed)) {
+    Atomic::release_store(&_state, new_state);
+    while (!Atomic::load_acquire(&_proceed)) {
       spinner.wait();
     }
-    OrderAccess::release_store(&_proceed, false);
+    Atomic::release_store(&_proceed, false);
   }
 
 public:
   NestedTestState state() const {
-    return OrderAccess::load_acquire(&_state);
+    return Atomic::load_acquire(&_state);
   }
 
   void wait_for_state(NestedTestState goal) {
@@ -82,7 +81,7 @@
   }
 
   void proceed() {
-    OrderAccess::release_store(&_proceed, true);
+    Atomic::release_store(&_proceed, true);
   }
 };
 
--- a/test/hotspot/gtest/utilities/test_lockFreeStack.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/test/hotspot/gtest/utilities/test_lockFreeStack.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -24,7 +24,6 @@
 #include "precompiled.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/lockFreeStack.hpp"
 #include "threadHelper.inline.hpp"
@@ -226,21 +225,21 @@
   {}
 
   virtual void main_run() {
-    OrderAccess::release_store_fence(&_ready, true);
+    Atomic::release_store_fence(&_ready, true);
     while (true) {
       Element* e = _from->pop();
       if (e != NULL) {
         _to->push(*e);
         Atomic::inc(_processed);
         ++_local_processed;
-      } else if (OrderAccess::load_acquire(_processed) == _process_limit) {
+      } else if (Atomic::load_acquire(_processed) == _process_limit) {
         tty->print_cr("thread %u processed " SIZE_FORMAT, _id, _local_processed);
         return;
       }
     }
   }
 
-  bool ready() const { return OrderAccess::load_acquire(&_ready); }
+  bool ready() const { return Atomic::load_acquire(&_ready); }
 };
 
 TEST_VM(LockFreeStackTest, stress) {
--- a/test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -24,7 +24,7 @@
 
 #include "precompiled.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/debug.hpp"
@@ -56,14 +56,14 @@
   virtual void main_run() {
     size_t iterations = 0;
     size_t values_changed = 0;
-    while (OrderAccess::load_acquire(_continue_running) != 0) {
+    while (Atomic::load_acquire(_continue_running) != 0) {
       { ThreadBlockInVM tbiv(this); } // Safepoint check outside critical section.
       ++iterations;
       SingleWriterSynchronizer::CriticalSection cs(_synchronizer);
-      uintx value = OrderAccess::load_acquire(_synchronized_value);
+      uintx value = Atomic::load_acquire(_synchronized_value);
       uintx new_value = value;
       for (uint i = 0; i < reader_iterations; ++i) {
-        new_value = OrderAccess::load_acquire(_synchronized_value);
+        new_value = Atomic::load_acquire(_synchronized_value);
         // A reader can see either the value it first read after
         // entering the critical section, or that value + 1.  No other
         // values are possible.
@@ -97,7 +97,7 @@
   {}
 
   virtual void main_run() {
-    while (OrderAccess::load_acquire(_continue_running) != 0) {
+    while (Atomic::load_acquire(_continue_running) != 0) {
       ++*_synchronized_value;
       _synchronizer->synchronize();
       { ThreadBlockInVM tbiv(this); } // Safepoint check.
--- a/test/hotspot/gtest/utilities/test_waitBarrier.cpp	Mon Nov 25 14:06:13 2019 +0100
+++ b/test/hotspot/gtest/utilities/test_waitBarrier.cpp	Mon Nov 25 12:22:13 2019 +0100
@@ -49,9 +49,9 @@
     // Similar to how a JavaThread would stop in a safepoint.
     while (!_exit) {
       // Load the published tag.
-      tag = OrderAccess::load_acquire(&wait_tag);
+      tag = Atomic::load_acquire(&wait_tag);
       // Publish the tag this thread is going to wait for.
-      OrderAccess::release_store(&_on_barrier, tag);
+      Atomic::release_store(&_on_barrier, tag);
       if (_on_barrier == 0) {
         SpinPause();
         continue;
@@ -60,9 +60,9 @@
       // Wait until we are woken.
       _wait_barrier->wait(tag);
       // Verify that we do not see an invalid value.
-      vv = OrderAccess::load_acquire(&valid_value);
+      vv = Atomic::load_acquire(&valid_value);
       ASSERT_EQ((vv & 0x1), 0);
-      OrderAccess::release_store(&_on_barrier, 0);
+      Atomic::release_store(&_on_barrier, 0);
     }
   }
 };
@@ -104,7 +104,7 @@
       // Arm next tag.
       wb.arm(next_tag);
       // Publish tag.
-      OrderAccess::release_store_fence(&wait_tag, next_tag);
+      Atomic::release_store_fence(&wait_tag, next_tag);
 
       // Wait until threads picked up new tag.
       while (reader1->_on_barrier != wait_tag ||
@@ -115,12 +115,12 @@
       }
 
       // Set an invalid value.
-      OrderAccess::release_store(&valid_value, valid_value + 1); // odd
+      Atomic::release_store(&valid_value, valid_value + 1); // odd
       os::naked_yield();
       // Set a valid value.
-      OrderAccess::release_store(&valid_value, valid_value + 1); // even
+      Atomic::release_store(&valid_value, valid_value + 1); // even
       // Publish inactive tag.
-      OrderAccess::release_store_fence(&wait_tag, 0); // Stores in WB must not float up.
+      Atomic::release_store_fence(&wait_tag, 0); // Stores in WB must not float up.
       wb.disarm();
 
       // Wait until threads done valid_value verification.