8234736: Harmonize parameter order in Atomic - store
authorstefank
Mon, 25 Nov 2019 12:30:24 +0100
changeset 59248 e92153ed8bdc
parent 59247 56bf71d64d51
child 59249 29b0d0b61615
8234736: Harmonize parameter order in Atomic - store Reviewed-by: rehn, dholmes
src/hotspot/os/bsd/os_bsd.cpp
src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp
src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp
src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp
src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp
src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp
src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp
src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp
src/hotspot/share/classfile/symbolTable.cpp
src/hotspot/share/code/compiledMethod.cpp
src/hotspot/share/code/dependencyContext.cpp
src/hotspot/share/code/nmethod.cpp
src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp
src/hotspot/share/gc/shared/satbMarkQueue.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp
src/hotspot/share/gc/z/zForwarding.inline.hpp
src/hotspot/share/gc/z/zLock.inline.hpp
src/hotspot/share/gc/z/zMark.cpp
src/hotspot/share/gc/z/zNMethod.cpp
src/hotspot/share/memory/allocation.inline.hpp
src/hotspot/share/oops/accessBackend.inline.hpp
src/hotspot/share/oops/klass.cpp
src/hotspot/share/oops/methodData.hpp
src/hotspot/share/oops/oop.inline.hpp
src/hotspot/share/prims/jni.cpp
src/hotspot/share/runtime/atomic.hpp
src/hotspot/share/runtime/basicLock.hpp
src/hotspot/share/runtime/objectMonitor.inline.hpp
src/hotspot/share/services/attachListener.hpp
src/hotspot/share/utilities/lockFreeStack.hpp
src/hotspot/share/utilities/vmError.cpp
--- a/src/hotspot/os/bsd/os_bsd.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -3264,7 +3264,7 @@
 
   while (processor_id < 0) {
     if (Atomic::cmpxchg(-2, &mapping[apic_id], -1) == -1) {
-      Atomic::store(Atomic::add(1, &next_processor_id) - 1, &mapping[apic_id]);
+      Atomic::store(&mapping[apic_id], Atomic::add(1, &next_processor_id) - 1);
     }
     processor_id = Atomic::load(&mapping[apic_id]);
   }
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -161,8 +161,8 @@
 
 template<>
 template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
-                                                 T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+                                                 T store_value) const {
   STATIC_ASSERT(8 == sizeof(T));
   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
@@ -173,7 +173,7 @@
 struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm__ volatile (  "xchgb (%2),%0"
                       : "=q" (v)
                       : "0" (v), "r" (p)
@@ -185,7 +185,7 @@
 struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm__ volatile (  "xchgw (%2),%0"
                       : "=r" (v)
                       : "0" (v), "r" (p)
@@ -197,7 +197,7 @@
 struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm__ volatile (  "xchgl (%2),%0"
                       : "=r" (v)
                       : "0" (v), "r" (p)
@@ -210,7 +210,7 @@
 struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm__ volatile (  "xchgq (%2), %0"
                       : "=r" (v)
                       : "0" (v), "r" (p)
--- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -276,8 +276,8 @@
 
 template<>
 template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
-                                                 T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+                                                 T store_value) const {
   STATIC_ASSERT(8 == sizeof(T));
   os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -88,14 +88,14 @@
 struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
+  void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
 };
 
 template<size_t byte_size>
 struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const { release_store(p, v); OrderAccess::fence(); }
+  void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }
 };
 
 #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
--- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -54,8 +54,8 @@
 
 template<>
 template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
-                                                 T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+                                                 T store_value) const {
   STATIC_ASSERT(8 == sizeof(T));
   (*os::atomic_store_long_func)(
     PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
--- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -161,8 +161,8 @@
 
 template<>
 template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
-                                                 T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+                                                 T store_value) const {
   STATIC_ASSERT(8 == sizeof(T));
   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
@@ -173,7 +173,7 @@
 struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm__ volatile (  "xchgb (%2),%0"
                       : "=q" (v)
                       : "0" (v), "r" (p)
@@ -185,7 +185,7 @@
 struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm__ volatile (  "xchgw (%2),%0"
                       : "=r" (v)
                       : "0" (v), "r" (p)
@@ -197,7 +197,7 @@
 struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm__ volatile (  "xchgl (%2),%0"
                       : "=r" (v)
                       : "0" (v), "r" (p)
@@ -210,7 +210,7 @@
 struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm__ volatile (  "xchgq (%2), %0"
                       : "=r" (v)
                       : "0" (v), "r" (p)
--- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -122,8 +122,8 @@
 
 template<>
 template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
-                                                 T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+                                                 T store_value) const {
   STATIC_ASSERT(8 == sizeof(T));
   os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 }
--- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -213,8 +213,8 @@
 
 template<>
 template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
-                                                 T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+                                                 T store_value) const {
   STATIC_ASSERT(8 == sizeof(T));
   volatile T* src = &store_value;
   __asm {
@@ -234,7 +234,7 @@
 struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm {
       mov edx, p;
       mov al, v;
@@ -247,7 +247,7 @@
 struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm {
       mov edx, p;
       mov ax, v;
@@ -260,7 +260,7 @@
 struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm {
       mov edx, p;
       mov eax, v;
--- a/src/hotspot/share/classfile/symbolTable.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/classfile/symbolTable.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -189,8 +189,8 @@
   }
 }
 
-void SymbolTable::reset_has_items_to_clean() { Atomic::store(false, &_has_items_to_clean); }
-void SymbolTable::mark_has_items_to_clean()  { Atomic::store(true, &_has_items_to_clean); }
+void SymbolTable::reset_has_items_to_clean() { Atomic::store(&_has_items_to_clean, false); }
+void SymbolTable::mark_has_items_to_clean()  { Atomic::store(&_has_items_to_clean, true); }
 bool SymbolTable::has_items_to_clean()       { return Atomic::load(&_has_items_to_clean); }
 
 void SymbolTable::item_added() {
--- a/src/hotspot/share/code/compiledMethod.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/code/compiledMethod.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -615,7 +615,7 @@
       if (md != NULL && md->is_method()) {
         Method* method = static_cast<Method*>(md);
         if (!method->method_holder()->is_loader_alive()) {
-          Atomic::store((Method*)NULL, r->metadata_addr());
+          Atomic::store(r->metadata_addr(), (Method*)NULL);
 
           if (!r->metadata_is_immediate()) {
             r->fix_metadata_relocation();
--- a/src/hotspot/share/code/dependencyContext.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/code/dependencyContext.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -300,7 +300,7 @@
 
 // Relaxed accessors
 void DependencyContext::set_dependencies(nmethodBucket* b) {
-  Atomic::store(b, _dependency_context_addr);
+  Atomic::store(_dependency_context_addr, b);
 }
 
 nmethodBucket* DependencyContext::dependencies() {
@@ -313,7 +313,7 @@
 void DependencyContext::cleaning_start() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be");
   uint64_t epoch = ++_cleaning_epoch_monotonic;
-  Atomic::store(epoch, &_cleaning_epoch);
+  Atomic::store(&_cleaning_epoch, epoch);
 }
 
 // The epilogue marks the end of dependency context cleanup by the GC,
@@ -323,7 +323,7 @@
 // was called. That allows dependency contexts to be cleaned concurrently.
 void DependencyContext::cleaning_end() {
   uint64_t epoch = 0;
-  Atomic::store(epoch, &_cleaning_epoch);
+  Atomic::store(&_cleaning_epoch, epoch);
 }
 
 // This function skips over nmethodBuckets in the list corresponding to
@@ -358,7 +358,7 @@
 }
 
 void nmethodBucket::set_next(nmethodBucket* b) {
-  Atomic::store(b, &_next);
+  Atomic::store(&_next, b);
 }
 
 nmethodBucket* nmethodBucket::purge_list_next() {
@@ -366,5 +366,5 @@
 }
 
 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
-  Atomic::store(b, &_purge_list_next);
+  Atomic::store(&_purge_list_next, b);
 }
--- a/src/hotspot/share/code/nmethod.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/code/nmethod.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -315,7 +315,7 @@
 }
 
 void ExceptionCache::set_next(ExceptionCache *ec) {
-  Atomic::store(ec, &_next);
+  Atomic::store(&_next, ec);
 }
 
 //-----------------------------------------------------------------------------
--- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -55,7 +55,7 @@
 }
 
 void G1BlockOffsetTable::set_offset_array_raw(size_t index, u_char offset) {
-  Atomic::store(offset, &_offset_array[index]);
+  Atomic::store(&_offset_array[index], offset);
 }
 
 void G1BlockOffsetTable::set_offset_array(size_t index, u_char offset) {
--- a/src/hotspot/share/gc/shared/satbMarkQueue.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -329,7 +329,7 @@
 #endif // PRODUCT
 
 void SATBMarkQueueSet::abandon_completed_buffers() {
-  Atomic::store(size_t(0), &_count_and_process_flag);
+  Atomic::store(&_count_and_process_flag, size_t(0));
   BufferNode* buffers_to_delete = _list.pop_all();
   while (buffers_to_delete != NULL) {
     BufferNode* bn = buffers_to_delete;
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -305,7 +305,7 @@
 }
 
 void ShenandoahHeapRegion::clear_live_data() {
-  Atomic::release_store_fence<size_t>(&_live_data, 0);
+  Atomic::release_store_fence(&_live_data, (size_t)0);
 }
 
 void ShenandoahHeapRegion::reset_alloc_metadata() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -178,12 +178,12 @@
 size_t ShenandoahPacer::update_and_get_progress_history() {
   if (_progress == -1) {
     // First initialization, report some prior
-    Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress);
+    Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
     return (size_t) (_heap->max_capacity() * 0.1);
   } else {
     // Record history, and reply historical data
     _progress_history->add(_progress);
-    Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress);
+    Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
     return (size_t) (_progress_history->avg() * HeapWordSize);
   }
 }
@@ -192,7 +192,7 @@
   size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize;
   STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
   Atomic::xchg((intptr_t)initial, &_budget);
-  Atomic::store(tax_rate, &_tax_rate);
+  Atomic::store(&_tax_rate, tax_rate);
   Atomic::inc(&_epoch);
 }
 
--- a/src/hotspot/share/gc/z/zForwarding.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/gc/z/zForwarding.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -54,7 +54,7 @@
 }
 
 inline void ZForwarding::set_pinned() {
-  Atomic::store(true, &_pinned);
+  Atomic::store(&_pinned, true);
 }
 
 inline bool ZForwarding::inc_refcount() {
--- a/src/hotspot/share/gc/z/zLock.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/gc/z/zLock.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -53,7 +53,7 @@
 
   if (owner != thread) {
     _lock.lock();
-    Atomic::store(thread, &_owner);
+    Atomic::store(&_owner, thread);
   }
 
   _count++;
@@ -66,7 +66,7 @@
   _count--;
 
   if (_count == 0) {
-    Atomic::store((Thread*)NULL, &_owner);
+    Atomic::store(&_owner, (Thread*)NULL);
     _lock.unlock();
   }
 }
--- a/src/hotspot/share/gc/z/zMark.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/gc/z/zMark.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -487,7 +487,7 @@
       // Flush before termination
       if (!try_flush(&_work_nterminateflush)) {
         // No more work available, skip further flush attempts
-        Atomic::store(false, &_work_terminateflush);
+        Atomic::store(&_work_terminateflush, false);
       }
 
       // Don't terminate, regardless of whether we successfully
--- a/src/hotspot/share/gc/z/zNMethod.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/gc/z/zNMethod.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -258,7 +258,7 @@
   volatile bool _failed;
 
   void set_failed() {
-    Atomic::store(true, &_failed);
+    Atomic::store(&_failed, true);
   }
 
   void unlink(nmethod* nm) {
--- a/src/hotspot/share/memory/allocation.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/memory/allocation.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -41,7 +41,7 @@
   *dest += add_value;
 #else
   julong value = Atomic::load(dest);
-  Atomic::store(value + add_value, dest);
+  Atomic::store(dest, value + add_value);
 #endif
 }
 #endif
--- a/src/hotspot/share/oops/accessBackend.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/oops/accessBackend.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -174,7 +174,7 @@
 inline typename EnableIf<
   HasDecorator<ds, MO_RELAXED>::value>::type
 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
-  Atomic::store(value, reinterpret_cast<volatile T*>(addr));
+  Atomic::store(reinterpret_cast<volatile T*>(addr), value);
 }
 
 template <DecoratorSet decorators>
--- a/src/hotspot/share/oops/klass.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/oops/klass.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -410,7 +410,7 @@
   // Does not need release semantics. If used by cleanup, it will link to
   // already safely published data, and if used by inserts, will be published
   // safely using cmpxchg.
-  Atomic::store(s, &_next_sibling);
+  Atomic::store(&_next_sibling, s);
 }
 
 void Klass::append_to_sibling_list() {
--- a/src/hotspot/share/oops/methodData.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/oops/methodData.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -2244,7 +2244,7 @@
     _rtm_state = (int)rstate;
   }
   void atomic_set_rtm_state(RTMState rstate) {
-    Atomic::store((int)rstate, &_rtm_state);
+    Atomic::store(&_rtm_state, (int)rstate);
   }
 
   static int rtm_state_offset_in_bytes() {
--- a/src/hotspot/share/oops/oop.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/oops/oop.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -61,7 +61,7 @@
 }
 
 void oopDesc::set_mark_raw(markWord m) {
-  Atomic::store(m, &_mark);
+  Atomic::store(&_mark, m);
 }
 
 void oopDesc::set_mark_raw(HeapWord* mem, markWord m) {
--- a/src/hotspot/share/prims/jni.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/prims/jni.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -3689,7 +3689,7 @@
   intptr_t *a = (intptr_t *) jni_functions();
   intptr_t *b = (intptr_t *) new_jni_NativeInterface;
   for (uint i=0; i <  sizeof(struct JNINativeInterface_)/sizeof(void *); i++) {
-    Atomic::store(*b++, a++);
+    Atomic::store(a++, *b++);
   }
 }
 
--- a/src/hotspot/share/runtime/atomic.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/runtime/atomic.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -79,13 +79,13 @@
   // The type T must be either a pointer type convertible to or equal
   // to D, an integral/enum type equal to D, or a type equal to D that
   // is primitive convertible using PrimitiveConversions.
-  template<typename T, typename D>
-  inline static void store(T store_value, volatile D* dest);
+  template<typename D, typename T>
+  inline static void store(volatile D* dest, T store_value);
 
-  template <typename T, typename D>
+  template <typename D, typename T>
   inline static void release_store(volatile D* dest, T store_value);
 
-  template <typename T, typename D>
+  template <typename D, typename T>
   inline static void release_store_fence(volatile D* dest, T store_value);
 
   // Atomically load from a location
@@ -168,7 +168,7 @@
   // Dispatch handler for store.  Provides type-based validity
   // checking and limited conversions around calls to the platform-
   // specific implementation layer provided by PlatformOp.
-  template<typename T, typename D, typename PlatformOp, typename Enable = void>
+  template<typename D, typename T, typename PlatformOp, typename Enable = void>
   struct StoreImpl;
 
   // Platform-specific implementation of store.  Support for sizes
@@ -450,9 +450,9 @@
   PlatformOp,
   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
 {
-  void operator()(T new_value, T volatile* dest) const {
+  void operator()(T volatile* dest, T new_value) const {
     // Forward to the platform handler for the size of T.
-    PlatformOp()(new_value, dest);
+    PlatformOp()(dest, new_value);
   }
 };
 
@@ -461,16 +461,16 @@
 // The new_value must be implicitly convertible to the
 // destination's type; it must be type-correct to store the
 // new_value in the destination.
-template<typename T, typename D, typename PlatformOp>
+template<typename D, typename T, typename PlatformOp>
 struct Atomic::StoreImpl<
-  T*, D*,
+  D*, T*,
   PlatformOp,
   typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
 {
-  void operator()(T* new_value, D* volatile* dest) const {
+  void operator()(D* volatile* dest, T* new_value) const {
     // Allow derived to base conversion, and adding cv-qualifiers.
     D* value = new_value;
-    PlatformOp()(value, dest);
+    PlatformOp()(dest, value);
   }
 };
 
@@ -486,12 +486,12 @@
   PlatformOp,
   typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
 {
-  void operator()(T new_value, T volatile* dest) const {
+  void operator()(T volatile* dest, T new_value) const {
     typedef PrimitiveConversions::Translate<T> Translator;
     typedef typename Translator::Decayed Decayed;
     STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
-    PlatformOp()(Translator::decay(new_value),
-                 reinterpret_cast<Decayed volatile*>(dest));
+    PlatformOp()(reinterpret_cast<Decayed volatile*>(dest),
+                 Translator::decay(new_value));
   }
 };
 
@@ -504,8 +504,8 @@
 template<size_t byte_size>
 struct Atomic::PlatformStore {
   template<typename T>
-  void operator()(T new_value,
-                  T volatile* dest) const {
+  void operator()(T volatile* dest,
+                  T new_value) const {
     STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
     (void)const_cast<T&>(*dest = new_value);
   }
@@ -654,28 +654,28 @@
   return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
 }
 
-template<typename T, typename D>
-inline void Atomic::store(T store_value, volatile D* dest) {
-  StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
+template<typename D, typename T>
+inline void Atomic::store(volatile D* dest, T store_value) {
+  StoreImpl<D, T, PlatformStore<sizeof(D)> >()(dest, store_value);
 }
 
 template<size_t byte_size, ScopedFenceType type>
 struct Atomic::PlatformOrderedStore {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     ScopedFence<type> f((void*)p);
-    Atomic::store(v, p);
+    Atomic::store(p, v);
   }
 };
 
-template <typename T, typename D>
+template <typename D, typename T>
 inline void Atomic::release_store(volatile D* p, T v) {
-  StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
+  StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v);
 }
 
-template <typename T, typename D>
+template <typename D, typename T>
 inline void Atomic::release_store_fence(volatile D* p, T v) {
-  StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
+  StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);
 }
 
 template<typename I, typename D>
--- a/src/hotspot/share/runtime/basicLock.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/runtime/basicLock.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -40,7 +40,7 @@
   }
 
   void set_displaced_header(markWord header) {
-    Atomic::store(header, &_displaced_header);
+    Atomic::store(&_displaced_header, header);
   }
 
   void print_on(outputStream* st) const;
--- a/src/hotspot/share/runtime/objectMonitor.inline.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -44,7 +44,7 @@
 }
 
 inline void ObjectMonitor::set_header(markWord hdr) {
-  Atomic::store(hdr, &_header);
+  Atomic::store(&_header, hdr);
 }
 
 inline jint ObjectMonitor::waiters() const {
@@ -63,7 +63,7 @@
   assert(_object != NULL, "must be non-NULL");
   assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
 
-  Atomic::store(markWord::zero(), &_header);
+  Atomic::store(&_header, markWord::zero());
   _object = NULL;
 }
 
--- a/src/hotspot/share/services/attachListener.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/services/attachListener.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -86,7 +86,7 @@
 
  public:
   static void set_state(AttachListenerState new_state) {
-    Atomic::store(new_state, &_state);
+    Atomic::store(&_state, new_state);
   }
 
   static AttachListenerState get_state() {
@@ -103,7 +103,7 @@
   }
 
   static void set_initialized() {
-    Atomic::store(AL_INITIALIZED, &_state);
+    Atomic::store(&_state, AL_INITIALIZED);
   }
 
   // indicates if this VM supports attach-on-demand
--- a/src/hotspot/share/utilities/lockFreeStack.hpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/utilities/lockFreeStack.hpp	Mon Nov 25 12:30:24 2019 +0100
@@ -170,7 +170,7 @@
   // if value is in an instance of this specialization of LockFreeStack,
   // there must be no concurrent push or pop operations on that stack.
   static void set_next(T& value, T* new_next) {
-    Atomic::store(new_next, next_ptr(value));
+    Atomic::store(next_ptr(value), new_next);
   }
 };
 
--- a/src/hotspot/share/utilities/vmError.cpp	Mon Nov 25 12:22:13 2019 +0100
+++ b/src/hotspot/share/utilities/vmError.cpp	Mon Nov 25 12:30:24 2019 +0100
@@ -399,7 +399,7 @@
 
 void VMError::record_reporting_start_time() {
   const jlong now = get_current_timestamp();
-  Atomic::store(now, &_reporting_start_time);
+  Atomic::store(&_reporting_start_time, now);
 }
 
 jlong VMError::get_reporting_start_time() {
@@ -408,7 +408,7 @@
 
 void VMError::record_step_start_time() {
   const jlong now = get_current_timestamp();
-  Atomic::store(now, &_step_start_time);
+  Atomic::store(&_step_start_time, now);
 }
 
 jlong VMError::get_step_start_time() {
@@ -416,7 +416,7 @@
 }
 
 void VMError::clear_step_start_time() {
-  return Atomic::store((jlong)0, &_step_start_time);
+  return Atomic::store(&_step_start_time, (jlong)0);
 }
 
 void VMError::report(outputStream* st, bool _verbose) {