src/hotspot/share/runtime/atomic.hpp
changeset 59247 56bf71d64d51
parent 53244 9807daeb47c4
child 59248 e92153ed8bdc
equal deleted inserted replaced
59246:fcad92f425c5 59247:56bf71d64d51
    32 #include "metaprogramming/isPointer.hpp"
    32 #include "metaprogramming/isPointer.hpp"
    33 #include "metaprogramming/isSame.hpp"
    33 #include "metaprogramming/isSame.hpp"
    34 #include "metaprogramming/primitiveConversions.hpp"
    34 #include "metaprogramming/primitiveConversions.hpp"
    35 #include "metaprogramming/removeCV.hpp"
    35 #include "metaprogramming/removeCV.hpp"
    36 #include "metaprogramming/removePointer.hpp"
    36 #include "metaprogramming/removePointer.hpp"
       
    37 #include "runtime/orderAccess.hpp"
    37 #include "utilities/align.hpp"
    38 #include "utilities/align.hpp"
    38 #include "utilities/macros.hpp"
    39 #include "utilities/macros.hpp"
    39 
    40 
    40 enum atomic_memory_order {
    41 enum atomic_memory_order {
    41   // The modes that align with C++11 are intended to
    42   // The modes that align with C++11 are intended to
    46   memory_order_acq_rel = 4,
    47   memory_order_acq_rel = 4,
    47   // Strong two-way memory barrier.
    48   // Strong two-way memory barrier.
    48   memory_order_conservative = 8
    49   memory_order_conservative = 8
    49 };
    50 };
    50 
    51 
       
    52 enum ScopedFenceType {
       
    53     X_ACQUIRE
       
    54   , RELEASE_X
       
    55   , RELEASE_X_FENCE
       
    56 };
       
    57 
    51 class Atomic : AllStatic {
    58 class Atomic : AllStatic {
    52 public:
    59 public:
    53   // Atomic operations on int64 types are not available on all 32-bit
    60   // Atomic operations on int64 types are not available on all 32-bit
    54   // platforms. If atomic ops on int64 are defined here they must only
    61   // platforms. If atomic ops on int64 are defined here they must only
    55   // be used from code that verifies they are available at runtime and
    62   // be used from code that verifies they are available at runtime and
    73   // to D, an integral/enum type equal to D, or a type equal to D that
    80   // to D, an integral/enum type equal to D, or a type equal to D that
    74   // is primitive convertible using PrimitiveConversions.
    81   // is primitive convertible using PrimitiveConversions.
    75   template<typename T, typename D>
    82   template<typename T, typename D>
    76   inline static void store(T store_value, volatile D* dest);
    83   inline static void store(T store_value, volatile D* dest);
    77 
    84 
       
    85   template <typename T, typename D>
       
    86   inline static void release_store(volatile D* dest, T store_value);
       
    87 
       
    88   template <typename T, typename D>
       
    89   inline static void release_store_fence(volatile D* dest, T store_value);
       
    90 
    78   // Atomically load from a location
    91   // Atomically load from a location
    79   // The type T must be either a pointer type, an integral/enum type,
    92   // The type T must be either a pointer type, an integral/enum type,
    80   // or a type that is primitive convertible using PrimitiveConversions.
    93   // or a type that is primitive convertible using PrimitiveConversions.
    81   template<typename T>
    94   template<typename T>
    82   inline static T load(const volatile T* dest);
    95   inline static T load(const volatile T* dest);
       
    96 
       
    97   template <typename T>
       
    98   inline static T load_acquire(const volatile T* dest);
    83 
    99 
    84   // Atomically add to a location. Returns updated value. add*() provide:
   100   // Atomically add to a location. Returns updated value. add*() provide:
    85   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
   101   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
    86 
   102 
    87   template<typename I, typename D>
   103   template<typename I, typename D>
   197   // must be a valid expression, returning a result convertible to T.
   213   // must be a valid expression, returning a result convertible to T.
   198   //
   214   //
   199   // The default implementation is a volatile load. If a platform
   215   // The default implementation is a volatile load. If a platform
   200   // requires more for e.g. 64 bit loads, a specialization is required
   216   // requires more for e.g. 64 bit loads, a specialization is required
   201   template<size_t byte_size> struct PlatformLoad;
   217   template<size_t byte_size> struct PlatformLoad;
       
   218 
       
   219   // Give platforms a variation point to specialize.
       
   220   template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
       
   221   template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
   202 
   222 
   203 private:
   223 private:
   204   // Dispatch handler for add.  Provides type-based validity checking
   224   // Dispatch handler for add.  Provides type-based validity checking
   205   // and limited conversions around calls to the platform-specific
   225   // and limited conversions around calls to the platform-specific
   206   // implementation layer provided by PlatformAdd.
   226   // implementation layer provided by PlatformAdd.
   576   T operator()(T exchange_value,
   596   T operator()(T exchange_value,
   577                T volatile* dest,
   597                T volatile* dest,
   578                atomic_memory_order order) const;
   598                atomic_memory_order order) const;
   579 };
   599 };
   580 
   600 
       
   601 template <ScopedFenceType T>
       
   602 class ScopedFenceGeneral: public StackObj {
       
   603  public:
       
   604   void prefix() {}
       
   605   void postfix() {}
       
   606 };
       
   607 
       
   608 // The following methods can be specialized using simple template specialization
       
   609 // in the platform specific files for optimization purposes. Otherwise the
       
   610 // generalized variant is used.
       
   611 
       
   612 template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix()       { OrderAccess::acquire(); }
       
   613 template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix()        { OrderAccess::release(); }
       
   614 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix()  { OrderAccess::release(); }
       
   615 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence();   }
       
   616 
       
   617 template <ScopedFenceType T>
       
   618 class ScopedFence : public ScopedFenceGeneral<T> {
       
   619   void *const _field;
       
   620  public:
       
   621   ScopedFence(void *const field) : _field(field) { prefix(); }
       
   622   ~ScopedFence() { postfix(); }
       
   623   void prefix() { ScopedFenceGeneral<T>::prefix(); }
       
   624   void postfix() { ScopedFenceGeneral<T>::postfix(); }
       
   625 };
       
   626 
   581 // platform specific in-line definitions - must come before shared definitions
   627 // platform specific in-line definitions - must come before shared definitions
   582 
   628 
   583 #include OS_CPU_HEADER(atomic)
   629 #include OS_CPU_HEADER(atomic)
   584 
   630 
   585 // shared in-line definitions
   631 // shared in-line definitions
   592 template<typename T>
   638 template<typename T>
   593 inline T Atomic::load(const volatile T* dest) {
   639 inline T Atomic::load(const volatile T* dest) {
   594   return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
   640   return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
   595 }
   641 }
   596 
   642 
       
   643 template<size_t byte_size, ScopedFenceType type>
       
   644 struct Atomic::PlatformOrderedLoad {
       
   645   template <typename T>
       
   646   T operator()(const volatile T* p) const {
       
   647     ScopedFence<type> f((void*)p);
       
   648     return Atomic::load(p);
       
   649   }
       
   650 };
       
   651 
       
   652 template <typename T>
       
   653 inline T Atomic::load_acquire(const volatile T* p) {
       
   654   return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
       
   655 }
       
   656 
   597 template<typename T, typename D>
   657 template<typename T, typename D>
   598 inline void Atomic::store(T store_value, volatile D* dest) {
   658 inline void Atomic::store(T store_value, volatile D* dest) {
   599   StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
   659   StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
       
   660 }
       
   661 
       
   662 template<size_t byte_size, ScopedFenceType type>
       
   663 struct Atomic::PlatformOrderedStore {
       
   664   template <typename T>
       
   665   void operator()(T v, volatile T* p) const {
       
   666     ScopedFence<type> f((void*)p);
       
   667     Atomic::store(v, p);
       
   668   }
       
   669 };
       
   670 
       
   671 template <typename T, typename D>
       
   672 inline void Atomic::release_store(volatile D* p, T v) {
       
   673   StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
       
   674 }
       
   675 
       
   676 template <typename T, typename D>
       
   677 inline void Atomic::release_store_fence(volatile D* p, T v) {
       
   678   StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
   600 }
   679 }
   601 
   680 
   602 template<typename I, typename D>
   681 template<typename I, typename D>
   603 inline D Atomic::add(I add_value, D volatile* dest,
   682 inline D Atomic::add(I add_value, D volatile* dest,
   604                      atomic_memory_order order) {
   683                      atomic_memory_order order) {