diff -r fcad92f425c5 -r 56bf71d64d51 src/hotspot/share/runtime/atomic.hpp --- a/src/hotspot/share/runtime/atomic.hpp Mon Nov 25 14:06:13 2019 +0100 +++ b/src/hotspot/share/runtime/atomic.hpp Mon Nov 25 12:22:13 2019 +0100 @@ -34,6 +34,7 @@ #include "metaprogramming/primitiveConversions.hpp" #include "metaprogramming/removeCV.hpp" #include "metaprogramming/removePointer.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" @@ -48,6 +49,12 @@ memory_order_conservative = 8 }; +enum ScopedFenceType { + X_ACQUIRE + , RELEASE_X + , RELEASE_X_FENCE +}; + class Atomic : AllStatic { public: // Atomic operations on int64 types are not available on all 32-bit @@ -75,12 +82,21 @@ template inline static void store(T store_value, volatile D* dest); + template + inline static void release_store(volatile D* dest, T store_value); + + template + inline static void release_store_fence(volatile D* dest, T store_value); + // Atomically load from a location // The type T must be either a pointer type, an integral/enum type, // or a type that is primitive convertible using PrimitiveConversions. template inline static T load(const volatile T* dest); + template + inline static T load_acquire(const volatile T* dest); + // Atomically add to a location. Returns updated value. add*() provide: // add-value-to-dest @@ -200,6 +216,10 @@ // requires more for e.g. 64 bit loads, a specialization is required template struct PlatformLoad; + // Give platforms a variation point to specialize. + template struct PlatformOrderedStore; + template struct PlatformOrderedLoad; + private: // Dispatch handler for add. Provides type-based validity checking // and limited conversions around calls to the platform-specific @@ -578,6 +598,32 @@ atomic_memory_order order) const; }; +template +class ScopedFenceGeneral: public StackObj { + public: + void prefix() {} + void postfix() {} +}; + +// The following methods can be specialized using simple template specialization +// in the platform specific files for optimization purposes. Otherwise the +// generalized variant is used. + +template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::acquire(); } +template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } +template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } +template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::fence(); } + +template +class ScopedFence : public ScopedFenceGeneral { + void *const _field; + public: + ScopedFence(void *const field) : _field(field) { prefix(); } + ~ScopedFence() { postfix(); } + void prefix() { ScopedFenceGeneral::prefix(); } + void postfix() { ScopedFenceGeneral::postfix(); } +}; + // platform specific in-line definitions - must come before shared definitions #include OS_CPU_HEADER(atomic) @@ -594,11 +640,44 @@ return LoadImpl >()(dest); } +template +struct Atomic::PlatformOrderedLoad { + template + T operator()(const volatile T* p) const { + ScopedFence f((void*)p); + return Atomic::load(p); + } +}; + +template +inline T Atomic::load_acquire(const volatile T* p) { + return LoadImpl >()(p); +} + template inline void Atomic::store(T store_value, volatile D* dest) { StoreImpl >()(store_value, dest); } +template +struct Atomic::PlatformOrderedStore { + template + void operator()(T v, volatile T* p) const { + ScopedFence f((void*)p); + Atomic::store(v, p); + } +}; + +template +inline void Atomic::release_store(volatile D* p, T v) { + StoreImpl >()(v, p); +} + +template +inline void Atomic::release_store_fence(volatile D* p, T v) { + StoreImpl >()(v, p); +} + template inline D Atomic::add(I add_value, D volatile* dest, atomic_memory_order order) {