# HG changeset patch # User stefank # Date 1574680933 -3600 # Node ID 56bf71d64d51f93486279b70a3dbc74a5c22bde9 # Parent fcad92f425c54666e83a3c339b580c7f55f1b93f 8234562: Move OrderAccess::release_store*/load_acquire to Atomic Reviewed-by: rehn, dholmes diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/cpu/ppc/nativeInst_ppc.cpp --- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -374,7 +374,7 @@ // Finally patch out the jump. volatile juint *jump_addr = (volatile juint*)instr_addr; // Release not needed because caller uses invalidate_range after copying the remaining bytes. - //OrderAccess::release_store(jump_addr, *((juint*)code_buffer)); + //Atomic::release_store(jump_addr, *((juint*)code_buffer)); *jump_addr = *((juint*)code_buffer); // atomically store code over branch instruction ICache::ppc64_flush_icache_bytes(instr_addr, NativeGeneralJump::instruction_size); } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os/bsd/os_bsd.cpp --- a/src/hotspot/os/bsd/os_bsd.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os/bsd/os_bsd.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -51,7 +51,6 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" -#include "runtime/orderAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" #include "runtime/semaphore.hpp" @@ -3209,7 +3208,7 @@ static volatile int next_processor_id = 0; static inline volatile int* get_apic_to_processor_mapping() { - volatile int* mapping = OrderAccess::load_acquire(&apic_to_processor_mapping); + volatile int* mapping = Atomic::load_acquire(&apic_to_processor_mapping); if (mapping == NULL) { // Calculate possible number space for APIC ids. This space is not necessarily // in the range [0, number_of_processors). @@ -3240,7 +3239,7 @@ if (!Atomic::replace_if_null(mapping, &apic_to_processor_mapping)) { FREE_C_HEAP_ARRAY(int, mapping); - mapping = OrderAccess::load_acquire(&apic_to_processor_mapping); + mapping = Atomic::load_acquire(&apic_to_processor_mapping); } } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os/windows/os_windows.cpp --- a/src/hotspot/os/windows/os_windows.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os/windows/os_windows.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -3747,7 +3747,7 @@ // The first thread that reached this point, initializes the critical section. if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); - } else if (OrderAccess::load_acquire(&process_exiting) == 0) { + } else if (Atomic::load_acquire(&process_exiting) == 0) { if (what != EPT_THREAD) { // Atomically set process_exiting before the critical section // to increase the visibility between racing threads. @@ -3755,7 +3755,7 @@ } EnterCriticalSection(&crit_sect); - if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { + if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) { // Remove from the array those handles of the threads that have completed exiting. for (i = 0, j = 0; i < handle_count; ++i) { res = WaitForSingleObject(handles[i], 0 /* don't wait */); @@ -3868,7 +3868,7 @@ } if (!registered && - OrderAccess::load_acquire(&process_exiting) != 0 && + Atomic::load_acquire(&process_exiting) != 0 && process_exiting != GetCurrentThreadId()) { // Some other thread is about to call exit(), so we don't let // the current unregistered thread proceed to exit() or _endthreadex() diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp --- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -30,6 +30,7 @@ #error "Atomic currently only implemented for PPC64" #endif +#include "orderAccess_aix_ppc.hpp" #include "utilities/debug.hpp" // Implementation of class atomic @@ -399,4 +400,15 @@ return old_value; } +template +struct Atomic::PlatformOrderedLoad { + template + T operator()(const volatile T* p) const { + T t = Atomic::load(p); + // Use twi-isync for load_acquire (faster than lwsync). + __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory"); + return t; + } +}; + #endif // OS_CPU_AIX_PPC_ATOMIC_AIX_PPC_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp --- a/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -64,8 +64,6 @@ #define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory"); #define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory"); #define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory"); -// Use twi-isync for load_acquire (faster than lwsync). -#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory"); inline void OrderAccess::loadload() { inlasm_lwsync(); } inline void OrderAccess::storestore() { inlasm_lwsync(); } @@ -78,13 +76,6 @@ inline void OrderAccess::cross_modify_fence() { inlasm_isync(); } -template -struct OrderAccess::PlatformOrderedLoad -{ - template - T operator()(const volatile T* p) const { T t = Atomic::load(p); inlasm_acquire_reg(t); return t; } -}; - #undef inlasm_sync #undef inlasm_lwsync #undef inlasm_eieio diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp --- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -169,4 +169,54 @@ #endif // AMD64 +template<> +struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgb (%2),%0" + : "=q" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +template<> +struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgw (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +template<> +struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgl (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +#ifdef AMD64 +template<> +struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgq (%2), %0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; +#endif // AMD64 + #endif // OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp --- a/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -64,54 +64,4 @@ __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory"); } -template<> -struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgb (%2),%0" - : "=q" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgw (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgl (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -#ifdef AMD64 -template<> -struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; -#endif // AMD64 - #endif // OS_CPU_BSD_X86_ORDERACCESS_BSD_X86_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp --- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -32,10 +32,6 @@ // Note that memory_order_conservative requires a full barrier after atomic stores. // See https://patchwork.kernel.org/patch/3575821/ -#define FULL_MEM_BARRIER __sync_synchronize() -#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE); -#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE); - template struct Atomic::PlatformAdd : Atomic::AddAndFetch > @@ -81,4 +77,25 @@ } } +template +struct Atomic::PlatformOrderedLoad +{ + template + T operator()(const volatile T* p) const { T data; __atomic_load(const_cast(p), &data, __ATOMIC_ACQUIRE); return data; } +}; + +template +struct Atomic::PlatformOrderedStore +{ + template + void operator()(T v, volatile T* p) const { __atomic_store(const_cast(p), &v, __ATOMIC_RELEASE); } +}; + +template +struct Atomic::PlatformOrderedStore +{ + template + void operator()(T v, volatile T* p) const { release_store(p, v); OrderAccess::fence(); } +}; + #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp --- a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -37,6 +37,10 @@ inline void OrderAccess::loadstore() { acquire(); } inline void OrderAccess::storeload() { fence(); } +#define FULL_MEM_BARRIER __sync_synchronize() +#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE); +#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE); + inline void OrderAccess::acquire() { READ_MEM_BARRIER; } @@ -51,25 +55,4 @@ inline void OrderAccess::cross_modify_fence() { } -template -struct OrderAccess::PlatformOrderedLoad -{ - template - T operator()(const volatile T* p) const { T data; __atomic_load(const_cast(p), &data, __ATOMIC_ACQUIRE); return data; } -}; - -template -struct OrderAccess::PlatformOrderedStore -{ - template - void operator()(T v, volatile T* p) const { __atomic_store(const_cast(p), &v, __ATOMIC_RELEASE); } -}; - -template -struct OrderAccess::PlatformOrderedStore -{ - template - void operator()(T v, volatile T* p) const { release_store(p, v); fence(); } -}; - #endif // OS_CPU_LINUX_AARCH64_ORDERACCESS_LINUX_AARCH64_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp --- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -30,6 +30,7 @@ #error "Atomic currently only implemented for PPC64" #endif +#include "orderAccess_linux_ppc.hpp" #include "utilities/debug.hpp" // Implementation of class atomic @@ -399,4 +400,16 @@ return old_value; } +template +struct Atomic::PlatformOrderedLoad +{ + template + T operator()(const volatile T* p) const { + T t = Atomic::load(p); + // Use twi-isync for load_acquire (faster than lwsync). + __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory"); + return t; + } +}; + #endif // OS_CPU_LINUX_PPC_ATOMIC_LINUX_PPC_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp --- a/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -68,8 +68,6 @@ #define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory"); #define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory"); #define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory"); -// Use twi-isync for load_acquire (faster than lwsync). -#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory"); inline void OrderAccess::loadload() { inlasm_lwsync(); } inline void OrderAccess::storestore() { inlasm_lwsync(); } @@ -82,17 +80,9 @@ inline void OrderAccess::cross_modify_fence() { inlasm_isync(); } -template -struct OrderAccess::PlatformOrderedLoad -{ - template - T operator()(const volatile T* p) const { T t = Atomic::load(p); inlasm_acquire_reg(t); return t; } -}; - #undef inlasm_sync #undef inlasm_lwsync #undef inlasm_eieio #undef inlasm_isync -#undef inlasm_acquire_reg #endif // OS_CPU_LINUX_PPC_ORDERACCESS_LINUX_PPC_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp --- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -335,4 +335,11 @@ return old; } +template +struct Atomic::PlatformOrderedLoad +{ + template + T operator()(const volatile T* p) const { T t = *p; OrderAccess::acquire(); return t; } +}; + #endif // OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp --- a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -76,13 +76,6 @@ inline void OrderAccess::fence() { inlasm_zarch_sync(); } inline void OrderAccess::cross_modify_fence() { inlasm_zarch_sync(); } -template -struct OrderAccess::PlatformOrderedLoad -{ - template - T operator()(const volatile T* p) const { T t = *p; inlasm_zarch_acquire(); return t; } -}; - #undef inlasm_compiler_barrier #undef inlasm_zarch_sync #undef inlasm_zarch_release diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp --- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -169,4 +169,54 @@ #endif // AMD64 +template<> +struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgb (%2),%0" + : "=q" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +template<> +struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgw (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +template<> +struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgl (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +#ifdef AMD64 +template<> +struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgq (%2), %0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; +#endif // AMD64 + #endif // OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp --- a/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -66,54 +66,4 @@ #endif } -template<> -struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgb (%2),%0" - : "=q" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgw (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgl (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -#ifdef AMD64 -template<> -struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; -#endif // AMD64 - #endif // OS_CPU_LINUX_X86_ORDERACCESS_LINUX_X86_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp --- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -27,6 +27,17 @@ #include "runtime/os.hpp" +// Note that in MSVC, volatile memory accesses are explicitly +// guaranteed to have acquire release semantics (w.r.t. compiler +// reordering) and therefore does not even need a compiler barrier +// for normal acquire release accesses. And all generalized +// bound calls like release_store go through Atomic::load +// and Atomic::store which do volatile memory accesses. +template<> inline void ScopedFence::postfix() { } +template<> inline void ScopedFence::prefix() { } +template<> inline void ScopedFence::prefix() { } +template<> inline void ScopedFence::postfix() { OrderAccess::fence(); } + // The following alternative implementations are needed because // Windows 95 doesn't support (some of) the corresponding Windows NT // calls. Furthermore, these versions allow inlining in the caller. @@ -218,4 +229,45 @@ #pragma warning(default: 4035) // Enables warnings reporting missing return statement +#ifndef AMD64 +template<> +struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov al, v; + xchg al, byte ptr [edx]; + } + } +}; + +template<> +struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov ax, v; + xchg ax, word ptr [edx]; + } + } +}; + +template<> +struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov eax, v; + xchg eax, dword ptr [edx]; + } + } +}; +#endif // AMD64 + #endif // OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp --- a/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -39,17 +39,6 @@ _ReadWriteBarrier(); } -// Note that in MSVC, volatile memory accesses are explicitly -// guaranteed to have acquire release semantics (w.r.t. compiler -// reordering) and therefore does not even need a compiler barrier -// for normal acquire release accesses. And all generalized -// bound calls like release_store go through OrderAccess::load -// and OrderAccess::store which do volatile memory accesses. -template<> inline void ScopedFence::postfix() { } -template<> inline void ScopedFence::prefix() { } -template<> inline void ScopedFence::prefix() { } -template<> inline void ScopedFence::postfix() { OrderAccess::fence(); } - inline void OrderAccess::loadload() { compiler_barrier(); } inline void OrderAccess::storestore() { compiler_barrier(); } inline void OrderAccess::loadstore() { compiler_barrier(); } @@ -74,45 +63,4 @@ __cpuid(regs, 0); } -#ifndef AMD64 -template<> -struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm { - mov edx, p; - mov al, v; - xchg al, byte ptr [edx]; - } - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm { - mov edx, p; - mov ax, v; - xchg ax, word ptr [edx]; - } - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm { - mov edx, p; - mov eax, v; - xchg eax, dword ptr [edx]; - } - } -}; -#endif // AMD64 - #endif // OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/classfile/classLoader.inline.hpp --- a/src/hotspot/share/classfile/classLoader.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/classfile/classLoader.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -29,11 +29,11 @@ #include "runtime/orderAccess.hpp" // Next entry in class path -inline ClassPathEntry* ClassPathEntry::next() const { return OrderAccess::load_acquire(&_next); } +inline ClassPathEntry* ClassPathEntry::next() const { return Atomic::load_acquire(&_next); } inline void ClassPathEntry::set_next(ClassPathEntry* next) { // may have unlocked readers, so ensure visibility. - OrderAccess::release_store(&_next, next); + Atomic::release_store(&_next, next); } inline ClassPathEntry* ClassLoader::classpath_entry(int n) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/classfile/classLoaderData.cpp --- a/src/hotspot/share/classfile/classLoaderData.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/classfile/classLoaderData.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -187,11 +187,11 @@ oop* ClassLoaderData::ChunkedHandleList::add(oop o) { if (_head == NULL || _head->_size == Chunk::CAPACITY) { Chunk* next = new Chunk(_head); - OrderAccess::release_store(&_head, next); + Atomic::release_store(&_head, next); } oop* handle = &_head->_data[_head->_size]; NativeAccess::oop_store(handle, o); - OrderAccess::release_store(&_head->_size, _head->_size + 1); + Atomic::release_store(&_head->_size, _head->_size + 1); return handle; } @@ -214,10 +214,10 @@ } void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) { - Chunk* head = OrderAccess::load_acquire(&_head); + Chunk* head = Atomic::load_acquire(&_head); if (head != NULL) { // Must be careful when reading size of head - oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size)); + oops_do_chunk(f, head, Atomic::load_acquire(&head->_size)); for (Chunk* c = head->_next; c != NULL; c = c->_next) { oops_do_chunk(f, c, c->_size); } @@ -326,7 +326,7 @@ void ClassLoaderData::classes_do(KlassClosure* klass_closure) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { klass_closure->do_klass(k); assert(k != k->next_link(), "no loops!"); } @@ -334,7 +334,7 @@ void ClassLoaderData::classes_do(void f(Klass * const)) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { f(k); assert(k != k->next_link(), "no loops!"); } @@ -342,7 +342,7 @@ void ClassLoaderData::methods_do(void f(Method*)) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) { InstanceKlass::cast(k)->methods_do(f); } @@ -351,7 +351,7 @@ void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { // Do not filter ArrayKlass oops here... if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) { #ifdef ASSERT @@ -366,7 +366,7 @@ void ClassLoaderData::classes_do(void f(InstanceKlass*)) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass()) { f(InstanceKlass::cast(k)); } @@ -465,7 +465,7 @@ k->set_next_link(old_value); // Link the new item into the list, making sure the linked class is stable // since the list can be walked without a lock - OrderAccess::release_store(&_klasses, k); + Atomic::release_store(&_klasses, k); if (k->is_array_klass()) { ClassLoaderDataGraph::inc_array_classes(1); } else { @@ -552,7 +552,7 @@ ModuleEntryTable* ClassLoaderData::modules() { // Lazily create the module entry table at first request. // Lock-free access requires load_acquire. - ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules); + ModuleEntryTable* modules = Atomic::load_acquire(&_modules); if (modules == NULL) { MutexLocker m1(Module_lock); // Check if _modules got allocated while we were waiting for this lock. @@ -562,7 +562,7 @@ { MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); // Ensure _modules is stable, since it is examined without a lock - OrderAccess::release_store(&_modules, modules); + Atomic::release_store(&_modules, modules); } } } @@ -752,7 +752,7 @@ // The reason for the delayed allocation is because some class loaders are // simply for delegating with no metadata of their own. // Lock-free access requires load_acquire. - ClassLoaderMetaspace* metaspace = OrderAccess::load_acquire(&_metaspace); + ClassLoaderMetaspace* metaspace = Atomic::load_acquire(&_metaspace); if (metaspace == NULL) { MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag); // Check if _metaspace got allocated while we were waiting for this lock. @@ -768,7 +768,7 @@ metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType); } // Ensure _metaspace is stable, since it is examined without a lock - OrderAccess::release_store(&_metaspace, metaspace); + Atomic::release_store(&_metaspace, metaspace); } } return metaspace; @@ -969,7 +969,7 @@ bool ClassLoaderData::contains_klass(Klass* klass) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k == klass) return true; } return false; diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/classfile/classLoaderDataGraph.cpp --- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -59,13 +59,13 @@ // // Any ClassLoaderData added after or during walking the list are prepended to // _head. Their claim mark need not be handled here. - for (ClassLoaderData* cld = OrderAccess::load_acquire(&_head); cld != NULL; cld = cld->next()) { + for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != NULL; cld = cld->next()) { cld->clear_claim(); } } void ClassLoaderDataGraph::clear_claimed_marks(int claim) { - for (ClassLoaderData* cld = OrderAccess::load_acquire(&_head); cld != NULL; cld = cld->next()) { + for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != NULL; cld = cld->next()) { cld->clear_claim(claim); } } @@ -220,7 +220,7 @@ // First install the new CLD to the Graph. cld->set_next(_head); - OrderAccess::release_store(&_head, cld); + Atomic::release_store(&_head, cld); // Next associate with the class_loader. if (!is_unsafe_anonymous) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/code/compiledMethod.cpp --- a/src/hotspot/share/code/compiledMethod.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/code/compiledMethod.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -113,7 +113,7 @@ //----------------------------------------------------------------------------- ExceptionCache* CompiledMethod::exception_cache_acquire() const { - return OrderAccess::load_acquire(&_exception_cache); + return Atomic::load_acquire(&_exception_cache); } void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/code/compiledMethod.inline.hpp --- a/src/hotspot/share/code/compiledMethod.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/code/compiledMethod.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -61,7 +61,7 @@ // class ExceptionCache methods -inline int ExceptionCache::count() { return OrderAccess::load_acquire(&_count); } +inline int ExceptionCache::count() { return Atomic::load_acquire(&_count); } address ExceptionCache::pc_at(int index) { assert(index >= 0 && index < count(),""); @@ -74,7 +74,7 @@ } // increment_count is only called under lock, but there may be concurrent readers. -inline void ExceptionCache::increment_count() { OrderAccess::release_store(&_count, _count + 1); } +inline void ExceptionCache::increment_count() { Atomic::release_store(&_count, _count + 1); } #endif // SHARE_CODE_COMPILEDMETHOD_INLINE_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/code/dependencyContext.cpp --- a/src/hotspot/share/code/dependencyContext.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/code/dependencyContext.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -281,7 +281,7 @@ nmethodBucket* DependencyContext::dependencies_not_unloading() { for (;;) { // Need acquire becase the read value could come from a concurrent insert. - nmethodBucket* head = OrderAccess::load_acquire(_dependency_context_addr); + nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr); if (head == NULL || !head->get_nmethod()->is_unloading()) { return head; } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp --- a/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -158,13 +158,13 @@ } G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() { - return OrderAccess::load_acquire(&_table); + return Atomic::load_acquire(&_table); } void G1CodeRootSet::allocate_small_table() { G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize); - OrderAccess::release_store(&_table, temp); + Atomic::release_store(&_table, temp); } void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) { @@ -194,7 +194,7 @@ G1CodeRootSetTable::purge_list_append(_table); - OrderAccess::release_store(&_table, temp); + Atomic::release_store(&_table, temp); } void G1CodeRootSet::purge() { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/g1/heapRegionRemSet.cpp --- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -219,7 +219,7 @@ // some mark bits may not yet seem cleared or a 'later' update // performed by a concurrent thread could be undone when the // zeroing becomes visible). This requires store ordering. - OrderAccess::release_store(&_fine_grain_regions[ind], prt); + Atomic::release_store(&_fine_grain_regions[ind], prt); _n_fine_entries++; // Transfer from sparse to fine-grain. diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/g1/heapRegionRemSet.hpp --- a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -190,7 +190,7 @@ // We need access in order to union things into the base table. BitMap* bm() { return &_bm; } - HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); } + HeapRegion* hr() const { return Atomic::load_acquire(&_hr); } jint occupied() const { // Overkill, but if we ever need it... diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/g1/heapRegionRemSet.inline.hpp --- a/src/hotspot/share/gc/g1/heapRegionRemSet.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -65,7 +65,7 @@ _bm.clear(); // Make sure that the bitmap clearing above has been finished before publishing // this PRT to concurrent threads. - OrderAccess::release_store(&_hr, hr); + Atomic::release_store(&_hr, hr); } template diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp --- a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -27,14 +27,14 @@ #include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/cardTable.hpp" -#include "runtime/orderAccess.hpp" +#include "runtime/atomic.hpp" template inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) { volatile CardValue* byte = _card_table->byte_for(field); if (_card_table->scanned_concurrently()) { // Perform a releasing store if the card table is scanned concurrently - OrderAccess::release_store(byte, CardTable::dirty_card_val()); + Atomic::release_store(byte, CardTable::dirty_card_val()); } else { *byte = CardTable::dirty_card_val(); } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/shared/concurrentGCThread.cpp --- a/src/hotspot/share/gc/shared/concurrentGCThread.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/shared/concurrentGCThread.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -51,7 +51,7 @@ // Signal thread has terminated MonitorLocker ml(Terminator_lock); - OrderAccess::release_store(&_has_terminated, true); + Atomic::release_store(&_has_terminated, true); ml.notify_all(); } @@ -60,7 +60,7 @@ assert(!has_terminated(), "Invalid state"); // Signal thread to terminate - OrderAccess::release_store_fence(&_should_terminate, true); + Atomic::release_store_fence(&_should_terminate, true); stop_service(); @@ -72,9 +72,9 @@ } bool ConcurrentGCThread::should_terminate() const { - return OrderAccess::load_acquire(&_should_terminate); + return Atomic::load_acquire(&_should_terminate); } bool ConcurrentGCThread::has_terminated() const { - return OrderAccess::load_acquire(&_has_terminated); + return Atomic::load_acquire(&_has_terminated); } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/shared/oopStorage.cpp --- a/src/hotspot/share/gc/shared/oopStorage.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/shared/oopStorage.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -140,7 +140,7 @@ } size_t OopStorage::ActiveArray::block_count_acquire() const { - return OrderAccess::load_acquire(&_block_count); + return Atomic::load_acquire(&_block_count); } void OopStorage::ActiveArray::increment_refcount() const { @@ -161,7 +161,7 @@ *block_ptr(index) = block; // Use a release_store to ensure all the setup is complete before // making the block visible. - OrderAccess::release_store(&_block_count, index + 1); + Atomic::release_store(&_block_count, index + 1); return true; } else { return false; @@ -264,8 +264,8 @@ bool OopStorage::Block::is_safe_to_delete() const { assert(is_empty(), "precondition"); OrderAccess::loadload(); - return (OrderAccess::load_acquire(&_release_refcount) == 0) && - (OrderAccess::load_acquire(&_deferred_updates_next) == NULL); + return (Atomic::load_acquire(&_release_refcount) == 0) && + (Atomic::load_acquire(&_deferred_updates_next) == NULL); } OopStorage::Block* OopStorage::Block::deferred_updates_next() const { @@ -514,7 +514,7 @@ // Update new_array refcount to account for the new reference. new_array->increment_refcount(); // Install new_array, ensuring its initialization is complete first. - OrderAccess::release_store(&_active_array, new_array); + Atomic::release_store(&_active_array, new_array); // Wait for any readers that could read the old array from _active_array. // Can't use GlobalCounter here, because this is called from allocate(), // which may be called in the scope of a GlobalCounter critical section @@ -532,7 +532,7 @@ // using it. OopStorage::ActiveArray* OopStorage::obtain_active_array() const { SingleWriterSynchronizer::CriticalSection cs(&_protect_active); - ActiveArray* result = OrderAccess::load_acquire(&_active_array); + ActiveArray* result = Atomic::load_acquire(&_active_array); result->increment_refcount(); return result; } @@ -645,7 +645,7 @@ // Atomically pop a block off the list, if any available. // No ABA issue because this is only called by one thread at a time. // The atomicity is wrto pushes by release(). - Block* block = OrderAccess::load_acquire(&_deferred_updates); + Block* block = Atomic::load_acquire(&_deferred_updates); while (true) { if (block == NULL) return false; // Try atomic pop of block from list. @@ -833,23 +833,23 @@ void OopStorage::record_needs_cleanup() { // Set local flag first, else service thread could wake up and miss // the request. This order may instead (rarely) unnecessarily notify. - OrderAccess::release_store(&_needs_cleanup, true); - OrderAccess::release_store_fence(&needs_cleanup_requested, true); + Atomic::release_store(&_needs_cleanup, true); + Atomic::release_store_fence(&needs_cleanup_requested, true); } bool OopStorage::delete_empty_blocks() { // Service thread might have oopstorage work, but not for this object. // Check for deferred updates even though that's not a service thread // trigger; since we're here, we might as well process them. - if (!OrderAccess::load_acquire(&_needs_cleanup) && - (OrderAccess::load_acquire(&_deferred_updates) == NULL)) { + if (!Atomic::load_acquire(&_needs_cleanup) && + (Atomic::load_acquire(&_deferred_updates) == NULL)) { return false; } MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); // Clear the request before processing. - OrderAccess::release_store_fence(&_needs_cleanup, false); + Atomic::release_store_fence(&_needs_cleanup, false); // Other threads could be adding to the empty block count or the // deferred update list while we're working. Set an upper bound on @@ -993,7 +993,7 @@ bool OopStorage::BasicParState::claim_next_segment(IterationData* data) { data->_processed += data->_segment_end - data->_segment_start; - size_t start = OrderAccess::load_acquire(&_next_block); + size_t start = Atomic::load_acquire(&_next_block); if (start >= _block_count) { return finish_iteration(data); // No more blocks available. } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/shared/ptrQueue.cpp --- a/src/hotspot/share/gc/shared/ptrQueue.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/shared/ptrQueue.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -224,7 +224,7 @@ log_trace(gc, ptrqueue, freelist) ("Transferred %s pending to free: " SIZE_FORMAT, name(), count); } - OrderAccess::release_store(&_transfer_lock, false); + Atomic::release_store(&_transfer_lock, false); return true; } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/shared/taskqueue.inline.hpp --- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -68,7 +68,7 @@ // assignment. However, casting to E& means that we trigger an // unused-value warning. So, we cast the E& to void. (void)const_cast(_elems[localBot] = t); - OrderAccess::release_store(&_bottom, increment_index(localBot)); + Atomic::release_store(&_bottom, increment_index(localBot)); TASKQUEUE_STATS_ONLY(stats.record_push()); return true; } @@ -89,7 +89,7 @@ // assignment. However, casting to E& means that we trigger an // unused-value warning. So, we cast the E& to void. (void) const_cast(_elems[localBot] = t); - OrderAccess::release_store(&_bottom, increment_index(localBot)); + Atomic::release_store(&_bottom, increment_index(localBot)); TASKQUEUE_STATS_ONLY(stats.record_push()); return true; } else { @@ -210,7 +210,7 @@ #ifndef CPU_MULTI_COPY_ATOMIC OrderAccess::fence(); #endif - uint localBot = OrderAccess::load_acquire(&_bottom); + uint localBot = Atomic::load_acquire(&_bottom); uint n_elems = size(localBot, oldAge.top()); if (n_elems == 0) { return false; diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -38,7 +38,7 @@ } void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() { - while ((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) != 0) { + while ((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) != 0) { os::naked_short_sleep(1); } // At this point we are sure that no threads can evacuate anything. Raise @@ -48,7 +48,7 @@ } void ShenandoahEvacOOMHandler::enter_evacuation() { - jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac); + jint threads_in_evac = Atomic::load_acquire(&_threads_in_evac); assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity"); assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set"); @@ -79,7 +79,7 @@ void ShenandoahEvacOOMHandler::leave_evacuation() { if (!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) { - assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) > 0, "sanity"); + assert((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) > 0, "sanity"); // NOTE: It's ok to simply decrement, even with mask set, because unmasked value is positive. Atomic::dec(&_threads_in_evac); } else { @@ -96,7 +96,7 @@ assert(ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity"); assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set"); - jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac); + jint threads_in_evac = Atomic::load_acquire(&_threads_in_evac); while (true) { jint other = Atomic::cmpxchg((threads_in_evac - 1) | OOM_MARKER_MASK, &_threads_in_evac, threads_in_evac); @@ -113,8 +113,8 @@ void ShenandoahEvacOOMHandler::clear() { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); - assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) == 0, "sanity"); - OrderAccess::release_store_fence(&_threads_in_evac, 0); + assert((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) == 0, "sanity"); + Atomic::release_store_fence(&_threads_in_evac, 0); } ShenandoahEvacOOMScope::ShenandoahEvacOOMScope() { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -601,7 +601,7 @@ } size_t ShenandoahHeap::used() const { - return OrderAccess::load_acquire(&_used); + return Atomic::load_acquire(&_used); } size_t ShenandoahHeap::committed() const { @@ -624,7 +624,7 @@ } void ShenandoahHeap::set_used(size_t bytes) { - OrderAccess::release_store_fence(&_used, bytes); + Atomic::release_store_fence(&_used, bytes); } void ShenandoahHeap::decrease_used(size_t bytes) { @@ -2114,11 +2114,11 @@ } size_t ShenandoahHeap::bytes_allocated_since_gc_start() { - return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start); + return Atomic::load_acquire(&_bytes_allocated_since_gc_start); } void ShenandoahHeap::reset_bytes_allocated_since_gc_start() { - OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0); + Atomic::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0); } void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -305,7 +305,7 @@ } void ShenandoahHeapRegion::clear_live_data() { - OrderAccess::release_store_fence(&_live_data, 0); + Atomic::release_store_fence(&_live_data, 0); } void ShenandoahHeapRegion::reset_alloc_metadata() { @@ -351,7 +351,7 @@ } size_t ShenandoahHeapRegion::get_live_data_words() const { - return OrderAccess::load_acquire(&_live_data); + return Atomic::load_acquire(&_live_data); } size_t ShenandoahHeapRegion::get_live_data_bytes() const { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -47,19 +47,19 @@ } void set() { - OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)SET); + Atomic::release_store_fence(&value, (ShenandoahSharedValue)SET); } void unset() { - OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)UNSET); + Atomic::release_store_fence(&value, (ShenandoahSharedValue)UNSET); } bool is_set() const { - return OrderAccess::load_acquire(&value) == SET; + return Atomic::load_acquire(&value) == SET; } bool is_unset() const { - return OrderAccess::load_acquire(&value) == UNSET; + return Atomic::load_acquire(&value) == UNSET; } void set_cond(bool val) { @@ -118,7 +118,7 @@ assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask; while (true) { - ShenandoahSharedValue ov = OrderAccess::load_acquire(&value); + ShenandoahSharedValue ov = Atomic::load_acquire(&value); if ((ov & mask_val) != 0) { // already set return; @@ -136,7 +136,7 @@ assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask; while (true) { - ShenandoahSharedValue ov = OrderAccess::load_acquire(&value); + ShenandoahSharedValue ov = Atomic::load_acquire(&value); if ((ov & mask_val) == 0) { // already unset return; @@ -151,7 +151,7 @@ } void clear() { - OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)0); + Atomic::release_store_fence(&value, (ShenandoahSharedValue)0); } bool is_set(uint mask) const { @@ -160,11 +160,11 @@ bool is_unset(uint mask) const { assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); - return (OrderAccess::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0; + return (Atomic::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0; } bool is_clear() const { - return (OrderAccess::load_acquire(&value)) == 0; + return (Atomic::load_acquire(&value)) == 0; } void set_cond(uint mask, bool val) { @@ -211,11 +211,11 @@ void set(T v) { assert (v >= 0, "sanity"); assert (v < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); - OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)v); + Atomic::release_store_fence(&value, (ShenandoahSharedValue)v); } T get() const { - return (T)OrderAccess::load_acquire(&value); + return (T)Atomic::load_acquire(&value); } T cmpxchg(T new_value, T expected) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -756,12 +756,12 @@ if (r->is_humongous()) { // For humongous objects, test if start region is marked live, and if so, // all humongous regions in that chain have live data equal to their "used". - juint start_live = OrderAccess::load_acquire(&ld[r->humongous_start_region()->region_number()]); + juint start_live = Atomic::load_acquire(&ld[r->humongous_start_region()->region_number()]); if (start_live > 0) { verf_live = (juint)(r->used() / HeapWordSize); } } else { - verf_live = OrderAccess::load_acquire(&ld[r->region_number()]); + verf_live = Atomic::load_acquire(&ld[r->region_number()]); } size_t reg_live = r->get_live_data_words(); diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/z/zLiveMap.cpp --- a/src/hotspot/share/gc/z/zLiveMap.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/z/zLiveMap.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -54,9 +54,9 @@ // Multiple threads can enter here, make sure only one of them // resets the marking information while the others busy wait. - for (uint32_t seqnum = OrderAccess::load_acquire(&_seqnum); + for (uint32_t seqnum = Atomic::load_acquire(&_seqnum); seqnum != ZGlobalSeqNum; - seqnum = OrderAccess::load_acquire(&_seqnum)) { + seqnum = Atomic::load_acquire(&_seqnum)) { if ((seqnum != seqnum_initializing) && (Atomic::cmpxchg(seqnum_initializing, &_seqnum, seqnum) == seqnum)) { // Reset marking information @@ -73,7 +73,7 @@ // before the update of the page seqnum, such that when the // up-to-date seqnum is load acquired, the bit maps will not // contain stale information. - OrderAccess::release_store(&_seqnum, ZGlobalSeqNum); + Atomic::release_store(&_seqnum, ZGlobalSeqNum); break; } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/z/zLiveMap.inline.hpp --- a/src/hotspot/share/gc/z/zLiveMap.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/z/zLiveMap.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -39,7 +39,7 @@ } inline bool ZLiveMap::is_marked() const { - return OrderAccess::load_acquire(&_seqnum) == ZGlobalSeqNum; + return Atomic::load_acquire(&_seqnum) == ZGlobalSeqNum; } inline uint32_t ZLiveMap::live_objects() const { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/z/zNMethodData.cpp --- a/src/hotspot/share/gc/z/zNMethodData.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/z/zNMethodData.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -78,7 +78,7 @@ } ZNMethodDataOops* ZNMethodData::oops() const { - return OrderAccess::load_acquire(&_oops); + return Atomic::load_acquire(&_oops); } ZNMethodDataOops* ZNMethodData::swap_oops(ZNMethodDataOops* new_oops) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/gc/z/zObjectAllocator.cpp --- a/src/hotspot/share/gc/z/zObjectAllocator.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -82,7 +82,7 @@ size_t size, ZAllocationFlags flags) { uintptr_t addr = 0; - ZPage* page = OrderAccess::load_acquire(shared_page); + ZPage* page = Atomic::load_acquire(shared_page); if (page != NULL) { addr = page->alloc_object_atomic(size); @@ -304,7 +304,7 @@ size_t ZObjectAllocator::remaining() const { assert(ZThread::is_java(), "Should be a Java thread"); - const ZPage* const page = OrderAccess::load_acquire(shared_small_page_addr()); + const ZPage* const page = Atomic::load_acquire(shared_small_page_addr()); if (page != NULL) { return page->remaining(); } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/interpreter/oopMapCache.cpp --- a/src/hotspot/share/interpreter/oopMapCache.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/interpreter/oopMapCache.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -448,7 +448,7 @@ } OopMapCacheEntry* OopMapCache::entry_at(int i) const { - return OrderAccess::load_acquire(&(_array[i % _size])); + return Atomic::load_acquire(&(_array[i % _size])); } bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -27,7 +27,7 @@ #include "jfr/utilities/jfrTypes.hpp" #include "memory/allocation.hpp" -#include "runtime/orderAccess.hpp" +#include "runtime/atomic.hpp" #define USED_BIT 1 #define METHOD_USED_BIT (USED_BIT << 2) @@ -91,16 +91,16 @@ } static bool has_changed_tag_state() { - if (OrderAccess::load_acquire(&_tag_state)) { - OrderAccess::release_store(&_tag_state, false); + if (Atomic::load_acquire(&_tag_state)) { + Atomic::release_store(&_tag_state, false); return true; } return false; } static void set_changed_tag_state() { - if (!OrderAccess::load_acquire(&_tag_state)) { - OrderAccess::release_store(&_tag_state, true); + if (!Atomic::load_acquire(&_tag_state)) { + Atomic::release_store(&_tag_state, true); } } }; diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp --- a/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -46,19 +46,19 @@ inline void set_generation(uint64_t value, uint64_t* const dest) { assert(dest != NULL, "invariant"); - OrderAccess::release_store(dest, value); + Atomic::release_store(dest, value); } static void increment_store_generation() { - const uint64_t current_serialized = OrderAccess::load_acquire(&serialized_generation); - const uint64_t current_stored = OrderAccess::load_acquire(&store_generation); + const uint64_t current_serialized = Atomic::load_acquire(&serialized_generation); + const uint64_t current_stored = Atomic::load_acquire(&store_generation); if (current_serialized == current_stored) { set_generation(current_serialized + 1, &store_generation); } } static bool increment_serialized_generation() { - const uint64_t current_stored = OrderAccess::load_acquire(&store_generation); - const uint64_t current_serialized = OrderAccess::load_acquire(&serialized_generation); + const uint64_t current_stored = Atomic::load_acquire(&store_generation); + const uint64_t current_serialized = Atomic::load_acquire(&serialized_generation); if (current_stored != current_serialized) { set_generation(current_stored, &serialized_generation); return true; diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/jfr/utilities/jfrHashtable.hpp --- a/src/hotspot/share/jfr/utilities/jfrHashtable.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/jfr/utilities/jfrHashtable.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -58,9 +58,9 @@ TableEntry* _entry; TableEntry* get_entry() const { - return (TableEntry*)OrderAccess::load_acquire(&_entry); + return (TableEntry*)Atomic::load_acquire(&_entry); } - void set_entry(TableEntry* entry) { OrderAccess::release_store(&_entry, entry);} + void set_entry(TableEntry* entry) { Atomic::release_store(&_entry, entry);} TableEntry** entry_addr() { return &_entry; } }; diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/logging/logDecorations.cpp --- a/src/hotspot/share/logging/logDecorations.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/logging/logDecorations.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -44,7 +44,7 @@ } const char* LogDecorations::host_name() { - const char* host_name = OrderAccess::load_acquire(&_host_name); + const char* host_name = Atomic::load_acquire(&_host_name); if (host_name == NULL) { char buffer[1024]; if (os::get_host_name(buffer, sizeof(buffer))) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/memory/metaspace.cpp --- a/src/hotspot/share/memory/metaspace.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/memory/metaspace.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -128,7 +128,7 @@ } size_t MetaspaceGC::capacity_until_GC() { - size_t value = OrderAccess::load_acquire(&_capacity_until_GC); + size_t value = Atomic::load_acquire(&_capacity_until_GC); assert(value >= MetaspaceSize, "Not initialized properly?"); return value; } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/accessBackend.inline.hpp --- a/src/hotspot/share/oops/accessBackend.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/accessBackend.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -134,7 +134,7 @@ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { OrderAccess::fence(); } - return OrderAccess::load_acquire(reinterpret_cast(addr)); + return Atomic::load_acquire(reinterpret_cast(addr)); } template @@ -142,7 +142,7 @@ inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::load_internal(void* addr) { - return OrderAccess::load_acquire(reinterpret_cast(addr)); + return Atomic::load_acquire(reinterpret_cast(addr)); } template @@ -158,7 +158,7 @@ inline typename EnableIf< HasDecorator::value>::type RawAccessBarrier::store_internal(void* addr, T value) { - OrderAccess::release_store_fence(reinterpret_cast(addr), value); + Atomic::release_store_fence(reinterpret_cast(addr), value); } template @@ -166,7 +166,7 @@ inline typename EnableIf< HasDecorator::value>::type RawAccessBarrier::store_internal(void* addr, T value) { - OrderAccess::release_store(reinterpret_cast(addr), value); + Atomic::release_store(reinterpret_cast(addr), value); } template diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/array.hpp --- a/src/hotspot/share/oops/array.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/array.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -27,7 +27,7 @@ #include "memory/allocation.hpp" #include "memory/metaspace.hpp" -#include "runtime/orderAccess.hpp" +#include "runtime/atomic.hpp" #include "utilities/align.hpp" // Array for metadata allocation @@ -122,8 +122,8 @@ T* adr_at(const int i) { assert(i >= 0 && i< _length, "oob: 0 <= %d < %d", i, _length); return &_data[i]; } int find(const T& x) { return index_of(x); } - T at_acquire(const int i) { return OrderAccess::load_acquire(adr_at(i)); } - void release_at_put(int i, T x) { OrderAccess::release_store(adr_at(i), x); } + T at_acquire(const int i) { return Atomic::load_acquire(adr_at(i)); } + void release_at_put(int i, T x) { Atomic::release_store(adr_at(i), x); } static int size(int length) { size_t bytes = align_up(byte_sizeof(length), BytesPerWord); diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/arrayKlass.inline.hpp --- a/src/hotspot/share/oops/arrayKlass.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/arrayKlass.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -29,11 +29,11 @@ #include "oops/arrayKlass.hpp" inline Klass* ArrayKlass::higher_dimension_acquire() const { - return OrderAccess::load_acquire(&_higher_dimension); + return Atomic::load_acquire(&_higher_dimension); } inline void ArrayKlass::release_set_higher_dimension(Klass* k) { - OrderAccess::release_store(&_higher_dimension, k); + Atomic::release_store(&_higher_dimension, k); } #endif // SHARE_OOPS_ARRAYKLASS_INLINE_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/constantPool.cpp --- a/src/hotspot/share/oops/constantPool.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/constantPool.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -232,7 +232,7 @@ symbol_at_put(name_index, name); name->increment_refcount(); Klass** adr = resolved_klasses()->adr_at(resolved_klass_index); - OrderAccess::release_store(adr, k); + Atomic::release_store(adr, k); // The interpreter assumes when the tag is stored, the klass is resolved // and the Klass* non-NULL, so we need hardware store ordering here. @@ -249,7 +249,7 @@ CPKlassSlot kslot = klass_slot_at(class_index); int resolved_klass_index = kslot.resolved_klass_index(); Klass** adr = resolved_klasses()->adr_at(resolved_klass_index); - OrderAccess::release_store(adr, k); + Atomic::release_store(adr, k); // The interpreter assumes when the tag is stored, the klass is resolved // and the Klass* non-NULL, so we need hardware store ordering here. @@ -525,7 +525,7 @@ trace_class_resolution(this_cp, k); } Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index); - OrderAccess::release_store(adr, k); + Atomic::release_store(adr, k); // The interpreter assumes when the tag is stored, the klass is resolved // and the Klass* stored in _resolved_klasses is non-NULL, so we need // hardware store ordering here. diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/constantPool.inline.hpp --- a/src/hotspot/share/oops/constantPool.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/constantPool.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -33,7 +33,7 @@ assert(is_within_bounds(which), "index out of bounds"); assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool"); // Uses volatile because the klass slot changes without a lock. - intptr_t adr = OrderAccess::load_acquire(obj_at_addr(which)); + intptr_t adr = Atomic::load_acquire(obj_at_addr(which)); assert(adr != 0 || which == 0, "cp entry for klass should not be zero"); return CPSlot(adr); } @@ -46,7 +46,7 @@ assert(tag_at(kslot.name_index()).is_symbol(), "sanity"); Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index()); - return OrderAccess::load_acquire(adr); + return Atomic::load_acquire(adr); } inline bool ConstantPool::is_pseudo_string_at(int which) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/cpCache.cpp --- a/src/hotspot/share/oops/cpCache.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/cpCache.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -97,7 +97,7 @@ assert(c == 0 || c == code || code == 0, "update must be consistent"); #endif // Need to flush pending stores here before bytecode is written. - OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift)); + Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift)); } void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { @@ -107,17 +107,17 @@ assert(c == 0 || c == code || code == 0, "update must be consistent"); #endif // Need to flush pending stores here before bytecode is written. - OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift)); + Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift)); } // Sets f1, ordering with previous writes. void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) { assert(f1 != NULL, ""); - OrderAccess::release_store(&_f1, f1); + Atomic::release_store(&_f1, f1); } void ConstantPoolCacheEntry::set_indy_resolution_failed() { - OrderAccess::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift)); + Atomic::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift)); } // Note that concurrent update of both bytecodes can leave one of them diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/cpCache.inline.hpp --- a/src/hotspot/share/oops/cpCache.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/cpCache.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -29,7 +29,7 @@ #include "oops/oopHandle.inline.hpp" #include "runtime/orderAccess.hpp" -inline int ConstantPoolCacheEntry::indices_ord() const { return OrderAccess::load_acquire(&_indices); } +inline int ConstantPoolCacheEntry::indices_ord() const { return Atomic::load_acquire(&_indices); } inline Bytecodes::Code ConstantPoolCacheEntry::bytecode_1() const { return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask); @@ -53,7 +53,7 @@ return (Method*)_f2; } -inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)OrderAccess::load_acquire(&_f1); } +inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)Atomic::load_acquire(&_f1); } inline Method* ConstantPoolCacheEntry::f1_as_method() const { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); @@ -75,7 +75,7 @@ return (!is_f1_null()) && (_flags & (1 << has_local_signature_shift)) != 0; } -inline intx ConstantPoolCacheEntry::flags_ord() const { return (intx)OrderAccess::load_acquire(&_flags); } +inline intx ConstantPoolCacheEntry::flags_ord() const { return (intx)Atomic::load_acquire(&_flags); } inline bool ConstantPoolCacheEntry::indy_resolution_failed() const { intx flags = flags_ord(); diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/instanceKlass.cpp --- a/src/hotspot/share/oops/instanceKlass.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/instanceKlass.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -1097,7 +1097,7 @@ return NULL; } else { // This load races with inserts, and therefore needs acquire. - Klass* kls = OrderAccess::load_acquire(k); + Klass* kls = Atomic::load_acquire(k); if (kls != NULL && !kls->is_loader_alive()) { return NULL; // don't return unloaded class } else { @@ -1113,7 +1113,7 @@ Klass* volatile* addr = adr_implementor(); assert(addr != NULL, "null addr"); if (addr != NULL) { - OrderAccess::release_store(addr, k); + Atomic::release_store(addr, k); } } @@ -1370,14 +1370,14 @@ InterpreterOopMap* entry_for) { // Lazily create the _oop_map_cache at first request // Lock-free access requires load_acquire. - OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache); + OopMapCache* oop_map_cache = Atomic::load_acquire(&_oop_map_cache); if (oop_map_cache == NULL) { MutexLocker x(OopMapCacheAlloc_lock); // Check if _oop_map_cache was allocated while we were waiting for this lock if ((oop_map_cache = _oop_map_cache) == NULL) { oop_map_cache = new OopMapCache(); // Ensure _oop_map_cache is stable, since it is examined without a lock - OrderAccess::release_store(&_oop_map_cache, oop_map_cache); + Atomic::release_store(&_oop_map_cache, oop_map_cache); } } // _oop_map_cache is constant after init; lookup below does its own locking. @@ -2114,7 +2114,7 @@ // The jmethodID cache can be read while unlocked so we have to // make sure the new jmethodID is complete before installing it // in the cache. - OrderAccess::release_store(&jmeths[idnum+1], id); + Atomic::release_store(&jmeths[idnum+1], id); } else { *to_dealloc_id_p = new_id; // save new id for later delete } @@ -2196,7 +2196,7 @@ assert (ClassUnloading, "only called for ClassUnloading"); for (;;) { // Use load_acquire due to competing with inserts - Klass* impl = OrderAccess::load_acquire(adr_implementor()); + Klass* impl = Atomic::load_acquire(adr_implementor()); if (impl != NULL && !impl->is_loader_alive()) { // NULL this field, might be an unloaded klass or NULL Klass* volatile* klass = adr_implementor(); diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/instanceKlass.inline.hpp --- a/src/hotspot/share/oops/instanceKlass.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/instanceKlass.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -35,19 +35,19 @@ #include "utilities/macros.hpp" inline Klass* InstanceKlass::array_klasses_acquire() const { - return OrderAccess::load_acquire(&_array_klasses); + return Atomic::load_acquire(&_array_klasses); } inline void InstanceKlass::release_set_array_klasses(Klass* k) { - OrderAccess::release_store(&_array_klasses, k); + Atomic::release_store(&_array_klasses, k); } inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const { - return OrderAccess::load_acquire(&_methods_jmethod_ids); + return Atomic::load_acquire(&_methods_jmethod_ids); } inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) { - OrderAccess::release_store(&_methods_jmethod_ids, jmeths); + Atomic::release_store(&_methods_jmethod_ids, jmeths); } // The iteration over the oops in objects is a hot path in the GC code. diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/klass.cpp --- a/src/hotspot/share/oops/klass.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/klass.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -362,7 +362,7 @@ Klass* Klass::subklass(bool log) const { // Need load_acquire on the _subklass, because it races with inserts that // publishes freshly initialized data. - for (Klass* chain = OrderAccess::load_acquire(&_subklass); + for (Klass* chain = Atomic::load_acquire(&_subklass); chain != NULL; // Do not need load_acquire on _next_sibling, because inserts never // create _next_sibling edges to dead data. @@ -402,7 +402,7 @@ void Klass::set_subklass(Klass* s) { assert(s != this, "sanity check"); - OrderAccess::release_store(&_subklass, s); + Atomic::release_store(&_subklass, s); } void Klass::set_next_sibling(Klass* s) { @@ -427,7 +427,7 @@ super->clean_subklass(); for (;;) { - Klass* prev_first_subklass = OrderAccess::load_acquire(&_super->_subklass); + Klass* prev_first_subklass = Atomic::load_acquire(&_super->_subklass); if (prev_first_subklass != NULL) { // set our sibling to be the superklass' previous first subklass assert(prev_first_subklass->is_loader_alive(), "May not attach not alive klasses"); @@ -446,7 +446,7 @@ void Klass::clean_subklass() { for (;;) { // Need load_acquire, due to contending with concurrent inserts - Klass* subklass = OrderAccess::load_acquire(&_subklass); + Klass* subklass = Atomic::load_acquire(&_subklass); if (subklass == NULL || subklass->is_loader_alive()) { return; } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/method.cpp --- a/src/hotspot/share/oops/method.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/method.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -1247,7 +1247,7 @@ } address Method::from_compiled_entry_no_trampoline() const { - CompiledMethod *code = OrderAccess::load_acquire(&_code); + CompiledMethod *code = Atomic::load_acquire(&_code); if (code) { return code->verified_entry_point(); } else { @@ -1273,7 +1273,7 @@ // Not inline to avoid circular ref. bool Method::check_code() const { // cached in a register or local. There's a race on the value of the field. - CompiledMethod *code = OrderAccess::load_acquire(&_code); + CompiledMethod *code = Atomic::load_acquire(&_code); return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method()); } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/method.inline.hpp --- a/src/hotspot/share/oops/method.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/method.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -29,23 +29,23 @@ #include "runtime/orderAccess.hpp" inline address Method::from_compiled_entry() const { - return OrderAccess::load_acquire(&_from_compiled_entry); + return Atomic::load_acquire(&_from_compiled_entry); } inline address Method::from_interpreted_entry() const { - return OrderAccess::load_acquire(&_from_interpreted_entry); + return Atomic::load_acquire(&_from_interpreted_entry); } inline void Method::set_method_data(MethodData* data) { // The store into method must be released. On platforms without // total store order (TSO) the reference may become visible before // the initialization of data otherwise. - OrderAccess::release_store(&_method_data, data); + Atomic::release_store(&_method_data, data); } inline CompiledMethod* volatile Method::code() const { assert( check_code(), "" ); - return OrderAccess::load_acquire(&_code); + return Atomic::load_acquire(&_code); } // Write (bci, line number) pair to stream diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/methodData.cpp --- a/src/hotspot/share/oops/methodData.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/methodData.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -1415,7 +1415,7 @@ for (;; dp = next_extra(dp)) { assert(dp < end, "moved past end of extra data"); - // No need for "OrderAccess::load_acquire" ops, + // No need for "Atomic::load_acquire" ops, // since the data structure is monotonic. switch(dp->tag()) { case DataLayout::no_tag: @@ -1550,7 +1550,7 @@ DataLayout* end = args_data_limit(); for (;; dp = next_extra(dp)) { assert(dp < end, "moved past end of extra data"); - // No need for "OrderAccess::load_acquire" ops, + // No need for "Atomic::load_acquire" ops, // since the data structure is monotonic. switch(dp->tag()) { case DataLayout::no_tag: diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/methodData.inline.hpp --- a/src/hotspot/share/oops/methodData.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/methodData.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -29,7 +29,7 @@ #include "runtime/orderAccess.hpp" inline void DataLayout::release_set_cell_at(int index, intptr_t value) { - OrderAccess::release_store(&_cells[index], value); + Atomic::release_store(&_cells[index], value); } inline void ProfileData::release_set_intptr_at(int index, intptr_t value) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/oops/oop.inline.hpp --- a/src/hotspot/share/oops/oop.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/oops/oop.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -110,9 +110,9 @@ // Workaround for non-const load_acquire parameter. const volatile narrowKlass* addr = &_metadata._compressed_klass; volatile narrowKlass* xaddr = const_cast(addr); - return CompressedKlassPointers::decode(OrderAccess::load_acquire(xaddr)); + return CompressedKlassPointers::decode(Atomic::load_acquire(xaddr)); } else { - return OrderAccess::load_acquire(&_metadata._klass); + return Atomic::load_acquire(&_metadata._klass); } } @@ -156,10 +156,10 @@ void oopDesc::release_set_klass(HeapWord* mem, Klass* klass) { CHECK_SET_KLASS(klass); if (UseCompressedClassPointers) { - OrderAccess::release_store(compressed_klass_addr(mem), - CompressedKlassPointers::encode_not_null(klass)); + Atomic::release_store(compressed_klass_addr(mem), + CompressedKlassPointers::encode_not_null(klass)); } else { - OrderAccess::release_store(klass_addr(mem), klass); + Atomic::release_store(klass_addr(mem), klass); } } @@ -356,7 +356,7 @@ // The forwardee is used when copying during scavenge and mark-sweep. // It does need to clear the low two locking- and GC-related bits. oop oopDesc::forwardee_acquire() const { - return (oop) OrderAccess::load_acquire(&_mark).decode_pointer(); + return (oop) Atomic::load_acquire(&_mark).decode_pointer(); } // The following method needs to be MT safe. diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/prims/jni.cpp --- a/src/hotspot/share/prims/jni.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/prims/jni.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -274,7 +274,7 @@ uintx count = 0; while (Atomic::cmpxchg(1, &JNIHistogram_lock, 0) != 0) { - while (OrderAccess::load_acquire(&JNIHistogram_lock) != 0) { + while (Atomic::load_acquire(&JNIHistogram_lock) != 0) { count +=1; if ( (WarnOnStalledSpinLock > 0) && (count % WarnOnStalledSpinLock == 0)) { @@ -3916,7 +3916,7 @@ *(JNIEnv**)penv = 0; // reset vm_created last to avoid race condition. Use OrderAccess to // control both compiler and architectural-based reordering. - OrderAccess::release_store(&vm_created, 0); + Atomic::release_store(&vm_created, 0); } // Flush stdout and stderr before exit. diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/prims/jvm.cpp --- a/src/hotspot/share/prims/jvm.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/prims/jvm.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -234,7 +234,7 @@ uintx count = 0; while (Atomic::cmpxchg(1, &JVMHistogram_lock, 0) != 0) { - while (OrderAccess::load_acquire(&JVMHistogram_lock) != 0) { + while (Atomic::load_acquire(&JVMHistogram_lock) != 0) { count +=1; if ( (WarnOnStalledSpinLock > 0) && (count % WarnOnStalledSpinLock == 0)) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/prims/jvmtiEnvBase.hpp --- a/src/hotspot/share/prims/jvmtiEnvBase.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/prims/jvmtiEnvBase.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -255,11 +255,11 @@ } JvmtiTagMap* tag_map_acquire() { - return OrderAccess::load_acquire(&_tag_map); + return Atomic::load_acquire(&_tag_map); } void release_set_tag_map(JvmtiTagMap* tag_map) { - OrderAccess::release_store(&_tag_map, tag_map); + Atomic::release_store(&_tag_map, tag_map); } // return true if event is enabled globally or for any thread diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/prims/jvmtiRawMonitor.cpp --- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -147,7 +147,7 @@ void JvmtiRawMonitor::simple_exit(Thread* self) { guarantee(_owner == self, "invariant"); - OrderAccess::release_store(&_owner, (Thread*)NULL); + Atomic::release_store(&_owner, (Thread*)NULL); OrderAccess::fence(); if (_entry_list == NULL) { return; diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/atomic.hpp --- a/src/hotspot/share/runtime/atomic.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/atomic.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -34,6 +34,7 @@ #include "metaprogramming/primitiveConversions.hpp" #include "metaprogramming/removeCV.hpp" #include "metaprogramming/removePointer.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" @@ -48,6 +49,12 @@ memory_order_conservative = 8 }; +enum ScopedFenceType { + X_ACQUIRE + , RELEASE_X + , RELEASE_X_FENCE +}; + class Atomic : AllStatic { public: // Atomic operations on int64 types are not available on all 32-bit @@ -75,12 +82,21 @@ template inline static void store(T store_value, volatile D* dest); + template + inline static void release_store(volatile D* dest, T store_value); + + template + inline static void release_store_fence(volatile D* dest, T store_value); + // Atomically load from a location // The type T must be either a pointer type, an integral/enum type, // or a type that is primitive convertible using PrimitiveConversions. template inline static T load(const volatile T* dest); + template + inline static T load_acquire(const volatile T* dest); + // Atomically add to a location. Returns updated value. add*() provide: // add-value-to-dest @@ -200,6 +216,10 @@ // requires more for e.g. 64 bit loads, a specialization is required template struct PlatformLoad; + // Give platforms a variation point to specialize. + template struct PlatformOrderedStore; + template struct PlatformOrderedLoad; + private: // Dispatch handler for add. Provides type-based validity checking // and limited conversions around calls to the platform-specific @@ -578,6 +598,32 @@ atomic_memory_order order) const; }; +template +class ScopedFenceGeneral: public StackObj { + public: + void prefix() {} + void postfix() {} +}; + +// The following methods can be specialized using simple template specialization +// in the platform specific files for optimization purposes. Otherwise the +// generalized variant is used. + +template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::acquire(); } +template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } +template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } +template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::fence(); } + +template +class ScopedFence : public ScopedFenceGeneral { + void *const _field; + public: + ScopedFence(void *const field) : _field(field) { prefix(); } + ~ScopedFence() { postfix(); } + void prefix() { ScopedFenceGeneral::prefix(); } + void postfix() { ScopedFenceGeneral::postfix(); } +}; + // platform specific in-line definitions - must come before shared definitions #include OS_CPU_HEADER(atomic) @@ -594,11 +640,44 @@ return LoadImpl >()(dest); } +template +struct Atomic::PlatformOrderedLoad { + template + T operator()(const volatile T* p) const { + ScopedFence f((void*)p); + return Atomic::load(p); + } +}; + +template +inline T Atomic::load_acquire(const volatile T* p) { + return LoadImpl >()(p); +} + template inline void Atomic::store(T store_value, volatile D* dest) { StoreImpl >()(store_value, dest); } +template +struct Atomic::PlatformOrderedStore { + template + void operator()(T v, volatile T* p) const { + ScopedFence f((void*)p); + Atomic::store(v, p); + } +}; + +template +inline void Atomic::release_store(volatile D* p, T v) { + StoreImpl >()(v, p); +} + +template +inline void Atomic::release_store_fence(volatile D* p, T v) { + StoreImpl >()(v, p); +} + template inline D Atomic::add(I add_value, D volatile* dest, atomic_memory_order order) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/handshake.cpp --- a/src/hotspot/share/runtime/handshake.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/handshake.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -294,7 +294,7 @@ if (!_semaphore.trywait()) { _semaphore.wait_with_safepoint_check(thread); } - HandshakeOperation* op = OrderAccess::load_acquire(&_operation); + HandshakeOperation* op = Atomic::load_acquire(&_operation); if (op != NULL) { HandleMark hm(thread); CautiouslyPreserveExceptionMark pem(thread); diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/init.cpp --- a/src/hotspot/share/runtime/init.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/init.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -195,7 +195,7 @@ static volatile bool _init_completed = false; bool is_init_completed() { - return OrderAccess::load_acquire(&_init_completed); + return Atomic::load_acquire(&_init_completed); } void wait_init_completed() { @@ -208,6 +208,6 @@ void set_init_completed() { assert(Universe::is_fully_initialized(), "Should have completed initialization"); MonitorLocker ml(InitCompleted_lock, Monitor::_no_safepoint_check_flag); - OrderAccess::release_store(&_init_completed, true); + Atomic::release_store(&_init_completed, true); ml.notify_all(); } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/interfaceSupport.cpp --- a/src/hotspot/share/runtime/interfaceSupport.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/interfaceSupport.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -88,7 +88,7 @@ uintx count = 0; while (Atomic::cmpxchg(1, &RuntimeHistogram_lock, 0) != 0) { - while (OrderAccess::load_acquire(&RuntimeHistogram_lock) != 0) { + while (Atomic::load_acquire(&RuntimeHistogram_lock) != 0) { count +=1; if ( (WarnOnStalledSpinLock > 0) && (count % WarnOnStalledSpinLock == 0)) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/objectMonitor.cpp --- a/src/hotspot/share/runtime/objectMonitor.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/objectMonitor.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -916,8 +916,8 @@ // release semantics: prior loads and stores from within the critical section // must not float (reorder) past the following store that drops the lock. - OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock - OrderAccess::storeload(); // See if we need to wake a successor + Atomic::release_store(&_owner, (void*)NULL); // drop the lock + OrderAccess::storeload(); // See if we need to wake a successor if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { return; } @@ -1092,7 +1092,7 @@ Wakee = NULL; // Drop the lock - OrderAccess::release_store(&_owner, (void*)NULL); + Atomic::release_store(&_owner, (void*)NULL); OrderAccess::fence(); // ST _owner vs LD in unpark() DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/orderAccess.hpp --- a/src/hotspot/share/runtime/orderAccess.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/orderAccess.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -26,7 +26,6 @@ #define SHARE_RUNTIME_ORDERACCESS_HPP #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" #include "utilities/macros.hpp" // Memory Access Ordering Model @@ -231,30 +230,7 @@ // order. If their implementations change such that these assumptions // are violated, a whole lot of code will break. -enum ScopedFenceType { - X_ACQUIRE - , RELEASE_X - , RELEASE_X_FENCE -}; - -template -class ScopedFenceGeneral: public StackObj { - public: - void prefix() {} - void postfix() {} -}; - -template -class ScopedFence : public ScopedFenceGeneral { - void *const _field; - public: - ScopedFence(void *const field) : _field(field) { prefix(); } - ~ScopedFence() { postfix(); } - void prefix() { ScopedFenceGeneral::prefix(); } - void postfix() { ScopedFenceGeneral::postfix(); } -}; - -class OrderAccess : private Atomic { +class OrderAccess : public AllStatic { public: // barriers static void loadload(); @@ -267,85 +243,13 @@ static void fence(); static void cross_modify_fence(); - - template - static T load_acquire(const volatile T* p); - - template - static void release_store(volatile D* p, T v); - - template - static void release_store_fence(volatile D* p, T v); - - private: +private: // This is a helper that invokes the StubRoutines::fence_entry() // routine if it exists, It should only be used by platforms that // don't have another way to do the inline assembly. static void StubRoutines_fence(); - - // Give platforms a variation point to specialize. - template struct PlatformOrderedStore; - template struct PlatformOrderedLoad; - - template - static void ordered_store(volatile FieldType* p, FieldType v); - - template - static FieldType ordered_load(const volatile FieldType* p); -}; - -// The following methods can be specialized using simple template specialization -// in the platform specific files for optimization purposes. Otherwise the -// generalized variant is used. - -template -struct OrderAccess::PlatformOrderedStore { - template - void operator()(T v, volatile T* p) const { - ordered_store(p, v); - } -}; - -template -struct OrderAccess::PlatformOrderedLoad { - template - T operator()(const volatile T* p) const { - return ordered_load(p); - } }; #include OS_CPU_HEADER(orderAccess) -template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::acquire(); } -template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } -template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } -template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::fence(); } - - -template -inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) { - ScopedFence f((void*)p); - Atomic::store(v, p); -} - -template -inline FieldType OrderAccess::ordered_load(const volatile FieldType* p) { - ScopedFence f((void*)p); - return Atomic::load(p); -} - -template -inline T OrderAccess::load_acquire(const volatile T* p) { - return LoadImpl >()(p); -} - -template -inline void OrderAccess::release_store(volatile D* p, T v) { - StoreImpl >()(v, p); -} - -template -inline void OrderAccess::release_store_fence(volatile D* p, T v) { - StoreImpl >()(v, p); -} #endif // SHARE_RUNTIME_ORDERACCESS_HPP diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/perfMemory.cpp --- a/src/hotspot/share/runtime/perfMemory.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/perfMemory.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -156,7 +156,7 @@ _prologue->overflow = 0; _prologue->mod_time_stamp = 0; - OrderAccess::release_store(&_initialized, 1); + Atomic::release_store(&_initialized, 1); } void PerfMemory::destroy() { @@ -269,5 +269,5 @@ } bool PerfMemory::is_initialized() { - return OrderAccess::load_acquire(&_initialized) != 0; + return Atomic::load_acquire(&_initialized) != 0; } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/safepoint.cpp --- a/src/hotspot/share/runtime/safepoint.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/safepoint.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -328,7 +328,7 @@ assert((_safepoint_counter & 0x1) == 0, "must be even"); // The store to _safepoint_counter must happen after any stores in arming. - OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1); + Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1); // We are synchronizing OrderAccess::storestore(); // Ordered with _safepoint_counter @@ -482,7 +482,7 @@ // Set the next dormant (even) safepoint id. assert((_safepoint_counter & 0x1) == 1, "must be odd"); - OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1); + Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1); OrderAccess::fence(); // Keep the local state from floating up. @@ -968,15 +968,15 @@ } uint64_t ThreadSafepointState::get_safepoint_id() const { - return OrderAccess::load_acquire(&_safepoint_id); + return Atomic::load_acquire(&_safepoint_id); } void ThreadSafepointState::reset_safepoint_id() { - OrderAccess::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter); + Atomic::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter); } void ThreadSafepointState::set_safepoint_id(uint64_t safepoint_id) { - OrderAccess::release_store(&_safepoint_id, safepoint_id); + Atomic::release_store(&_safepoint_id, safepoint_id); } void ThreadSafepointState::examine_state_of_thread(uint64_t safepoint_count) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/synchronizer.cpp --- a/src/hotspot/share/runtime/synchronizer.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/synchronizer.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -889,7 +889,7 @@ // Visitors ... void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { - PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list); + PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list); while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); for (int i = _BLOCKSIZE - 1; i > 0; i--) { @@ -1118,7 +1118,7 @@ temp[0]._next_om = g_block_list; // There are lock-free uses of g_block_list so make sure that // the previous stores happen before we update g_block_list. - OrderAccess::release_store(&g_block_list, temp); + Atomic::release_store(&g_block_list, temp); // Add the new string of ObjectMonitors to the global free list temp[_BLOCKSIZE - 1]._next_om = g_free_list; @@ -2169,7 +2169,7 @@ // the list of extant blocks without taking a lock. int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { - PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list); + PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list); while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/thread.cpp --- a/src/hotspot/share/runtime/thread.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/thread.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -1269,7 +1269,7 @@ NonJavaThread::Iterator::Iterator() : _protect_enter(_the_list._protect.enter()), - _current(OrderAccess::load_acquire(&_the_list._head)) + _current(Atomic::load_acquire(&_the_list._head)) {} NonJavaThread::Iterator::~Iterator() { @@ -1278,7 +1278,7 @@ void NonJavaThread::Iterator::step() { assert(!end(), "precondition"); - _current = OrderAccess::load_acquire(&_current->_next); + _current = Atomic::load_acquire(&_current->_next); } NonJavaThread::NonJavaThread() : Thread(), _next(NULL) { @@ -1291,8 +1291,8 @@ MutexLocker ml(NonJavaThreadsList_lock, Mutex::_no_safepoint_check_flag); // Initialize BarrierSet-related data before adding to list. BarrierSet::barrier_set()->on_thread_attach(this); - OrderAccess::release_store(&_next, _the_list._head); - OrderAccess::release_store(&_the_list._head, this); + Atomic::release_store(&_next, _the_list._head); + Atomic::release_store(&_the_list._head, this); } void NonJavaThread::remove_from_the_list() { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/thread.inline.hpp --- a/src/hotspot/share/runtime/thread.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/thread.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -67,7 +67,7 @@ } inline jlong Thread::cooked_allocated_bytes() { - jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes); + jlong allocated_bytes = Atomic::load_acquire(&_allocated_bytes); if (UseTLAB) { size_t used_bytes = tlab().used_bytes(); if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) { @@ -87,11 +87,11 @@ } inline ThreadsList* Thread::get_threads_hazard_ptr() { - return (ThreadsList*)OrderAccess::load_acquire(&_threads_hazard_ptr); + return (ThreadsList*)Atomic::load_acquire(&_threads_hazard_ptr); } inline void Thread::set_threads_hazard_ptr(ThreadsList* new_list) { - OrderAccess::release_store_fence(&_threads_hazard_ptr, new_list); + Atomic::release_store_fence(&_threads_hazard_ptr, new_list); } inline void JavaThread::set_ext_suspended() { @@ -118,7 +118,7 @@ #if defined(PPC64) || defined (AARCH64) // Use membars when accessing volatile _thread_state. See // Threads::create_vm() for size checks. - return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state); + return (JavaThreadState) Atomic::load_acquire((volatile jint*)&_thread_state); #else return _thread_state; #endif @@ -128,7 +128,7 @@ #if defined(PPC64) || defined (AARCH64) // Use membars when accessing volatile _thread_state. See // Threads::create_vm() for size checks. - OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s); + Atomic::release_store((volatile jint*)&_thread_state, (jint)s); #else _thread_state = s; #endif @@ -200,7 +200,7 @@ // The release make sure this store is done after storing the handshake // operation or global state inline void JavaThread::set_polling_page_release(void* poll_value) { - OrderAccess::release_store(polling_page_addr(), poll_value); + Atomic::release_store(polling_page_addr(), poll_value); } // Caller is responsible for using a memory barrier if needed. @@ -211,14 +211,14 @@ // The aqcquire make sure reading of polling page is done before // the reading the handshake operation or the global state inline volatile void* JavaThread::get_polling_page() { - return OrderAccess::load_acquire(polling_page_addr()); + return Atomic::load_acquire(polling_page_addr()); } inline bool JavaThread::is_exiting() const { // Use load-acquire so that setting of _terminated by // JavaThread::exit() is seen more quickly. TerminatedTypes l_terminated = (TerminatedTypes) - OrderAccess::load_acquire((volatile jint *) &_terminated); + Atomic::load_acquire((volatile jint *) &_terminated); return l_terminated == _thread_exiting || check_is_terminated(l_terminated); } @@ -226,19 +226,19 @@ // Use load-acquire so that setting of _terminated by // JavaThread::exit() is seen more quickly. TerminatedTypes l_terminated = (TerminatedTypes) - OrderAccess::load_acquire((volatile jint *) &_terminated); + Atomic::load_acquire((volatile jint *) &_terminated); return check_is_terminated(l_terminated); } inline void JavaThread::set_terminated(TerminatedTypes t) { // use release-store so the setting of _terminated is seen more quickly - OrderAccess::release_store((volatile jint *) &_terminated, (jint) t); + Atomic::release_store((volatile jint *) &_terminated, (jint) t); } // special for Threads::remove() which is static: inline void JavaThread::set_terminated_value() { // use release-store so the setting of _terminated is seen more quickly - OrderAccess::release_store((volatile jint *) &_terminated, (jint) _thread_terminated); + Atomic::release_store((volatile jint *) &_terminated, (jint) _thread_terminated); } // Allow tracking of class initialization monitor use diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/threadHeapSampler.cpp --- a/src/hotspot/share/runtime/threadHeapSampler.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/threadHeapSampler.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -139,9 +139,9 @@ } int ThreadHeapSampler::get_sampling_interval() { - return OrderAccess::load_acquire(&_sampling_interval); + return Atomic::load_acquire(&_sampling_interval); } void ThreadHeapSampler::set_sampling_interval(int sampling_interval) { - OrderAccess::release_store(&_sampling_interval, sampling_interval); + Atomic::release_store(&_sampling_interval, sampling_interval); } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/threadSMR.cpp --- a/src/hotspot/share/runtime/threadSMR.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/threadSMR.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -779,7 +779,7 @@ bool ThreadsSMRSupport::delete_notify() { // Use load_acquire() in order to see any updates to _delete_notify // earlier than when delete_lock is grabbed. - return (OrderAccess::load_acquire(&_delete_notify) != 0); + return (Atomic::load_acquire(&_delete_notify) != 0); } // Safely free a ThreadsList after a Threads::add() or Threads::remove(). diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/threadSMR.inline.hpp --- a/src/hotspot/share/runtime/threadSMR.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/threadSMR.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -78,7 +78,7 @@ } inline ThreadsList* ThreadsSMRSupport::get_java_thread_list() { - return (ThreadsList*)OrderAccess::load_acquire(&_java_thread_list); + return (ThreadsList*)Atomic::load_acquire(&_java_thread_list); } inline bool ThreadsSMRSupport::is_a_protected_JavaThread_with_lock(JavaThread *thread) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/vmThread.cpp --- a/src/hotspot/share/runtime/vmThread.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/vmThread.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -203,16 +203,16 @@ } bool VMOperationTimeoutTask::is_armed() { - return OrderAccess::load_acquire(&_armed) != 0; + return Atomic::load_acquire(&_armed) != 0; } void VMOperationTimeoutTask::arm() { _arm_time = os::javaTimeMillis(); - OrderAccess::release_store_fence(&_armed, 1); + Atomic::release_store_fence(&_armed, 1); } void VMOperationTimeoutTask::disarm() { - OrderAccess::release_store_fence(&_armed, 0); + Atomic::release_store_fence(&_armed, 0); } //------------------------------------------------------------------------------------------------------------------ diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/services/memoryManager.cpp --- a/src/hotspot/share/services/memoryManager.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/services/memoryManager.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -65,7 +65,7 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) { // Must do an acquire so as to force ordering of subsequent // loads from anything _memory_mgr_obj points to or implies. - instanceOop mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj); + instanceOop mgr_obj = Atomic::load_acquire(&_memory_mgr_obj); if (mgr_obj == NULL) { // It's ok for more than one thread to execute the code up to the locked region. // Extra manager instances will just be gc'ed. @@ -118,7 +118,7 @@ // // The lock has done an acquire, so the load can't float above it, but // we need to do a load_acquire as above. - mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj); + mgr_obj = Atomic::load_acquire(&_memory_mgr_obj); if (mgr_obj != NULL) { return mgr_obj; } @@ -130,7 +130,7 @@ // with creating the management object are visible before publishing // its address. The unlock will publish the store to _memory_mgr_obj // because it does a release first. - OrderAccess::release_store(&_memory_mgr_obj, mgr_obj); + Atomic::release_store(&_memory_mgr_obj, mgr_obj); } } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/services/memoryPool.cpp --- a/src/hotspot/share/services/memoryPool.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/services/memoryPool.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -77,7 +77,7 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) { // Must do an acquire so as to force ordering of subsequent // loads from anything _memory_pool_obj points to or implies. - instanceOop pool_obj = OrderAccess::load_acquire(&_memory_pool_obj); + instanceOop pool_obj = Atomic::load_acquire(&_memory_pool_obj); if (pool_obj == NULL) { // It's ok for more than one thread to execute the code up to the locked region. // Extra pool instances will just be gc'ed. @@ -118,7 +118,7 @@ // // The lock has done an acquire, so the load can't float above it, // but we need to do a load_acquire as above. - pool_obj = OrderAccess::load_acquire(&_memory_pool_obj); + pool_obj = Atomic::load_acquire(&_memory_pool_obj); if (pool_obj != NULL) { return pool_obj; } @@ -130,7 +130,7 @@ // with creating the pool are visible before publishing its address. // The unlock will publish the store to _memory_pool_obj because // it does a release first. - OrderAccess::release_store(&_memory_pool_obj, pool_obj); + Atomic::release_store(&_memory_pool_obj, pool_obj); } } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/utilities/bitMap.inline.hpp --- a/src/hotspot/share/utilities/bitMap.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/utilities/bitMap.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -48,7 +48,7 @@ memory_order == memory_order_acquire || memory_order == memory_order_conservative, "unexpected memory ordering"); - return OrderAccess::load_acquire(addr); + return Atomic::load_acquire(addr); } } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/utilities/concurrentHashTable.inline.hpp --- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -58,7 +58,7 @@ ConcurrentHashTable:: Node::next() const { - return OrderAccess::load_acquire(&_next); + return Atomic::load_acquire(&_next); } // Bucket @@ -67,7 +67,7 @@ ConcurrentHashTable:: Bucket::first_raw() const { - return OrderAccess::load_acquire(&_first); + return Atomic::load_acquire(&_first); } template @@ -79,7 +79,7 @@ // Due to this assert this methods is not static. assert(is_locked(), "Must be locked."); Node** tmp = (Node**)dst; - OrderAccess::release_store(tmp, clear_set_state(node, *dst)); + Atomic::release_store(tmp, clear_set_state(node, *dst)); } template @@ -88,7 +88,7 @@ Bucket::first() const { // We strip the states bit before returning the ptr. - return clear_state(OrderAccess::load_acquire(&_first)); + return clear_state(Atomic::load_acquire(&_first)); } template @@ -173,7 +173,7 @@ assert(is_locked(), "Must be locked."); assert(!have_redirect(), "Unlocking a bucket after it has reached terminal state."); - OrderAccess::release_store(&_first, clear_state(first())); + Atomic::release_store(&_first, clear_state(first())); } template @@ -181,7 +181,7 @@ Bucket::redirect() { assert(is_locked(), "Must be locked."); - OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT)); + Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT)); } // InternalTable @@ -217,8 +217,8 @@ _cs_context(GlobalCounter::critical_section_begin(_thread)) { // This version is published now. - if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) { - OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL); + if (Atomic::load_acquire(&_cht->_invisible_epoch) != NULL) { + Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL); } } @@ -289,13 +289,13 @@ assert(_resize_lock_owner == thread, "Re-size lock not held"); OrderAccess::fence(); // Prevent below load from floating up. // If no reader saw this version we can skip write_synchronize. - if (OrderAccess::load_acquire(&_invisible_epoch) == thread) { + if (Atomic::load_acquire(&_invisible_epoch) == thread) { return; } assert(_invisible_epoch == NULL, "Two thread doing bulk operations"); // We set this/next version that we are synchronizing for to not published. // A reader will zero this flag if it reads this/next version. - OrderAccess::release_store(&_invisible_epoch, thread); + Atomic::release_store(&_invisible_epoch, thread); GlobalCounter::write_synchronize(); } @@ -374,7 +374,7 @@ ConcurrentHashTable:: get_table() const { - return OrderAccess::load_acquire(&_table); + return Atomic::load_acquire(&_table); } template @@ -382,7 +382,7 @@ ConcurrentHashTable:: get_new_table() const { - return OrderAccess::load_acquire(&_new_table); + return Atomic::load_acquire(&_new_table); } template @@ -392,7 +392,7 @@ { InternalTable* old_table = _table; // Publish the new table. - OrderAccess::release_store(&_table, _new_table); + Atomic::release_store(&_table, _new_table); // All must see this. GlobalCounter::write_synchronize(); // _new_table not read any more. diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp --- a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -74,7 +74,7 @@ // Returns false if all ranges are claimed. bool have_more_work() { - return OrderAccess::load_acquire(&_next_to_claim) >= _stop_task; + return Atomic::load_acquire(&_next_to_claim) >= _stop_task; } void thread_owns_resize_lock(Thread* thread) { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/utilities/globalCounter.cpp --- a/src/hotspot/share/utilities/globalCounter.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/utilities/globalCounter.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -41,7 +41,7 @@ SpinYield yield; // Loops on this thread until it has exited the critical read section. while(true) { - uintx cnt = OrderAccess::load_acquire(thread->get_rcu_counter()); + uintx cnt = Atomic::load_acquire(thread->get_rcu_counter()); // This checks if the thread's counter is active. And if so is the counter // for a pre-existing reader (belongs to this grace period). A pre-existing // reader will have a lower counter than the global counter version for this diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/utilities/globalCounter.inline.hpp --- a/src/hotspot/share/utilities/globalCounter.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/utilities/globalCounter.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -40,7 +40,7 @@ if ((new_cnt & COUNTER_ACTIVE) == 0) { new_cnt = Atomic::load(&_global_counter._counter) | COUNTER_ACTIVE; } - OrderAccess::release_store_fence(thread->get_rcu_counter(), new_cnt); + Atomic::release_store_fence(thread->get_rcu_counter(), new_cnt); return static_cast(old_cnt); } @@ -49,8 +49,8 @@ assert(thread == Thread::current(), "must be current thread"); assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section"); // Restore the counter value from before the associated begin. - OrderAccess::release_store(thread->get_rcu_counter(), - static_cast(context)); + Atomic::release_store(thread->get_rcu_counter(), + static_cast(context)); } class GlobalCounter::CriticalSection { diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/utilities/hashtable.inline.hpp --- a/src/hotspot/share/utilities/hashtable.inline.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/utilities/hashtable.inline.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -88,7 +88,7 @@ // SystemDictionary are read without locks. The new entry must be // complete before other threads can be allowed to see it // via a store to _buckets[index]. - OrderAccess::release_store(&_entry, l); + Atomic::release_store(&_entry, l); } @@ -97,7 +97,7 @@ // SystemDictionary are read without locks. The new entry must be // complete before other threads can be allowed to see it // via a store to _buckets[index]. - return OrderAccess::load_acquire(&_entry); + return Atomic::load_acquire(&_entry); } diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/utilities/singleWriterSynchronizer.cpp --- a/src/hotspot/share/utilities/singleWriterSynchronizer.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/utilities/singleWriterSynchronizer.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -85,7 +85,7 @@ // to complete, e.g. for the value of old_ptr to catch up with old. // Loop because there could be pending wakeups unrelated to this // synchronize request. - while (old != OrderAccess::load_acquire(old_ptr)) { + while (old != Atomic::load_acquire(old_ptr)) { _wakeup.wait(); } // (5) Drain any pending wakeups. A critical section exit may have diff -r fcad92f425c5 -r 56bf71d64d51 test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp --- a/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -27,7 +27,6 @@ #include "memory/allocation.hpp" #include "runtime/atomic.hpp" #include "runtime/interfaceSupport.inline.hpp" -#include "runtime/orderAccess.hpp" #include "runtime/semaphore.inline.hpp" #include "runtime/thread.hpp" #include "utilities/debug.hpp" @@ -108,7 +107,7 @@ {} virtual void main_run() { - while (OrderAccess::load_acquire(_continue_running)) { + while (Atomic::load_acquire(_continue_running)) { uint id = _set->claim_par_id(); _set->release_par_id(id); ++_allocations; @@ -147,7 +146,7 @@ ThreadInVMfromNative invm(this_thread); this_thread->sleep(milliseconds_to_run); } - OrderAccess::release_store(&continue_running, false); + Atomic::release_store(&continue_running, false); for (uint i = 0; i < nthreads; ++i) { ThreadInVMfromNative invm(this_thread); post.wait_with_safepoint_check(this_thread); diff -r fcad92f425c5 -r 56bf71d64d51 test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp --- a/test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -26,7 +26,7 @@ #include "gc/shared/ptrQueue.hpp" #include "memory/allocation.hpp" #include "runtime/interfaceSupport.inline.hpp" -#include "runtime/orderAccess.hpp" +#include "runtime/atomic.hpp" #include "runtime/semaphore.inline.hpp" #include "runtime/thread.hpp" #include "utilities/globalCounter.inline.hpp" @@ -150,7 +150,7 @@ {} virtual void main_run() { - while (OrderAccess::load_acquire(_continue_running)) { + while (Atomic::load_acquire(_continue_running)) { BufferNode* node = _allocator->allocate(); _cbl->push(node); ++_allocations; @@ -184,7 +184,7 @@ BufferNode* node = _cbl->pop(); if (node != NULL) { _allocator->release(node); - } else if (!OrderAccess::load_acquire(_continue_running)) { + } else if (!Atomic::load_acquire(_continue_running)) { return; } ThreadBlockInVM tbiv(this); // Safepoint check. @@ -226,12 +226,12 @@ ThreadInVMfromNative invm(this_thread); this_thread->sleep(milliseconds_to_run); } - OrderAccess::release_store(&allocator_running, false); + Atomic::release_store(&allocator_running, false); for (uint i = 0; i < nthreads; ++i) { ThreadInVMfromNative invm(this_thread); post.wait_with_safepoint_check(this_thread); } - OrderAccess::release_store(&processor_running, false); + Atomic::release_store(&processor_running, false); for (uint i = 0; i < nthreads; ++i) { ThreadInVMfromNative invm(this_thread); post.wait_with_safepoint_check(this_thread); diff -r fcad92f425c5 -r 56bf71d64d51 test/hotspot/gtest/utilities/test_globalCounter.cpp --- a/test/hotspot/gtest/utilities/test_globalCounter.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/test/hotspot/gtest/utilities/test_globalCounter.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -23,7 +23,6 @@ #include "precompiled.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "utilities/globalCounter.hpp" #include "utilities/globalCounter.inline.hpp" @@ -48,14 +47,14 @@ _wrt_start->signal(); while (!_exit) { GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(this); - volatile TestData* test = OrderAccess::load_acquire(_test); - long value = OrderAccess::load_acquire(&test->test_value); + volatile TestData* test = Atomic::load_acquire(_test); + long value = Atomic::load_acquire(&test->test_value); ASSERT_EQ(value, GOOD_VALUE); GlobalCounter::critical_section_end(this, cs_context); { GlobalCounter::CriticalSection cs(this); - volatile TestData* test = OrderAccess::load_acquire(_test); - long value = OrderAccess::load_acquire(&test->test_value); + volatile TestData* test = Atomic::load_acquire(_test); + long value = Atomic::load_acquire(&test->test_value); ASSERT_EQ(value, GOOD_VALUE); } } @@ -82,7 +81,7 @@ TestData* tmp = new TestData(); tmp->test_value = GOOD_VALUE; - OrderAccess::release_store_fence(&test, tmp); + Atomic::release_store_fence(&test, tmp); reader1->doit(); reader2->doit(); @@ -99,7 +98,7 @@ volatile TestData* free_tmp = test; tmp = new TestData(); tmp->test_value = GOOD_VALUE; - OrderAccess::release_store(&test, tmp); + Atomic::release_store(&test, tmp); GlobalCounter::write_synchronize(); free_tmp->test_value = BAD_VALUE; delete free_tmp; diff -r fcad92f425c5 -r 56bf71d64d51 test/hotspot/gtest/utilities/test_globalCounter_nested.cpp --- a/test/hotspot/gtest/utilities/test_globalCounter_nested.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/test/hotspot/gtest/utilities/test_globalCounter_nested.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "metaprogramming/isRegisteredEnum.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "utilities/globalCounter.hpp" #include "utilities/globalCounter.inline.hpp" @@ -57,21 +56,21 @@ ~RCUNestedThread() {} void set_state(NestedTestState new_state) { - OrderAccess::release_store(&_state, new_state); + Atomic::release_store(&_state, new_state); } void wait_with_state(NestedTestState new_state) { SpinYield spinner; - OrderAccess::release_store(&_state, new_state); - while (!OrderAccess::load_acquire(&_proceed)) { + Atomic::release_store(&_state, new_state); + while (!Atomic::load_acquire(&_proceed)) { spinner.wait(); } - OrderAccess::release_store(&_proceed, false); + Atomic::release_store(&_proceed, false); } public: NestedTestState state() const { - return OrderAccess::load_acquire(&_state); + return Atomic::load_acquire(&_state); } void wait_for_state(NestedTestState goal) { @@ -82,7 +81,7 @@ } void proceed() { - OrderAccess::release_store(&_proceed, true); + Atomic::release_store(&_proceed, true); } }; diff -r fcad92f425c5 -r 56bf71d64d51 test/hotspot/gtest/utilities/test_lockFreeStack.cpp --- a/test/hotspot/gtest/utilities/test_lockFreeStack.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/test/hotspot/gtest/utilities/test_lockFreeStack.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "memory/allocation.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/lockFreeStack.hpp" #include "threadHelper.inline.hpp" @@ -226,21 +225,21 @@ {} virtual void main_run() { - OrderAccess::release_store_fence(&_ready, true); + Atomic::release_store_fence(&_ready, true); while (true) { Element* e = _from->pop(); if (e != NULL) { _to->push(*e); Atomic::inc(_processed); ++_local_processed; - } else if (OrderAccess::load_acquire(_processed) == _process_limit) { + } else if (Atomic::load_acquire(_processed) == _process_limit) { tty->print_cr("thread %u processed " SIZE_FORMAT, _id, _local_processed); return; } } } - bool ready() const { return OrderAccess::load_acquire(&_ready); } + bool ready() const { return Atomic::load_acquire(&_ready); } }; TEST_VM(LockFreeStackTest, stress) { diff -r fcad92f425c5 -r 56bf71d64d51 test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp --- a/test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -24,7 +24,7 @@ #include "precompiled.hpp" #include "runtime/interfaceSupport.inline.hpp" -#include "runtime/orderAccess.hpp" +#include "runtime/atomic.hpp" #include "runtime/os.hpp" #include "runtime/thread.hpp" #include "utilities/debug.hpp" @@ -56,14 +56,14 @@ virtual void main_run() { size_t iterations = 0; size_t values_changed = 0; - while (OrderAccess::load_acquire(_continue_running) != 0) { + while (Atomic::load_acquire(_continue_running) != 0) { { ThreadBlockInVM tbiv(this); } // Safepoint check outside critical section. ++iterations; SingleWriterSynchronizer::CriticalSection cs(_synchronizer); - uintx value = OrderAccess::load_acquire(_synchronized_value); + uintx value = Atomic::load_acquire(_synchronized_value); uintx new_value = value; for (uint i = 0; i < reader_iterations; ++i) { - new_value = OrderAccess::load_acquire(_synchronized_value); + new_value = Atomic::load_acquire(_synchronized_value); // A reader can see either the value it first read after // entering the critical section, or that value + 1. No other // values are possible. @@ -97,7 +97,7 @@ {} virtual void main_run() { - while (OrderAccess::load_acquire(_continue_running) != 0) { + while (Atomic::load_acquire(_continue_running) != 0) { ++*_synchronized_value; _synchronizer->synchronize(); { ThreadBlockInVM tbiv(this); } // Safepoint check. diff -r fcad92f425c5 -r 56bf71d64d51 test/hotspot/gtest/utilities/test_waitBarrier.cpp --- a/test/hotspot/gtest/utilities/test_waitBarrier.cpp Mon Nov 25 14:06:13 2019 +0100 +++ b/test/hotspot/gtest/utilities/test_waitBarrier.cpp Mon Nov 25 12:22:13 2019 +0100 @@ -49,9 +49,9 @@ // Similar to how a JavaThread would stop in a safepoint. while (!_exit) { // Load the published tag. - tag = OrderAccess::load_acquire(&wait_tag); + tag = Atomic::load_acquire(&wait_tag); // Publish the tag this thread is going to wait for. - OrderAccess::release_store(&_on_barrier, tag); + Atomic::release_store(&_on_barrier, tag); if (_on_barrier == 0) { SpinPause(); continue; @@ -60,9 +60,9 @@ // Wait until we are woken. _wait_barrier->wait(tag); // Verify that we do not see an invalid value. - vv = OrderAccess::load_acquire(&valid_value); + vv = Atomic::load_acquire(&valid_value); ASSERT_EQ((vv & 0x1), 0); - OrderAccess::release_store(&_on_barrier, 0); + Atomic::release_store(&_on_barrier, 0); } } }; @@ -104,7 +104,7 @@ // Arm next tag. wb.arm(next_tag); // Publish tag. - OrderAccess::release_store_fence(&wait_tag, next_tag); + Atomic::release_store_fence(&wait_tag, next_tag); // Wait until threads picked up new tag. while (reader1->_on_barrier != wait_tag || @@ -115,12 +115,12 @@ } // Set an invalid value. - OrderAccess::release_store(&valid_value, valid_value + 1); // odd + Atomic::release_store(&valid_value, valid_value + 1); // odd os::naked_yield(); // Set a valid value. - OrderAccess::release_store(&valid_value, valid_value + 1); // even + Atomic::release_store(&valid_value, valid_value + 1); // even // Publish inactive tag. - OrderAccess::release_store_fence(&wait_tag, 0); // Stores in WB must not float up. + Atomic::release_store_fence(&wait_tag, 0); // Stores in WB must not float up. wb.disarm(); // Wait until threads done valid_value verification.