8234737: Harmonize parameter order in Atomic - add
authorstefank
Mon, 25 Nov 2019 12:31:39 +0100
changeset 59249 29b0d0b61615
parent 59248 e92153ed8bdc
child 59250 a6deb69743d4
8234737: Harmonize parameter order in Atomic - add Reviewed-by: rehn, dholmes
src/hotspot/cpu/arm/stubGenerator_arm.cpp
src/hotspot/cpu/sparc/stubGenerator_sparc.cpp
src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
src/hotspot/os/bsd/os_bsd.cpp
src/hotspot/os/linux/os_linux.cpp
src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp
src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp
src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp
src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp
src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp
src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp
src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp
src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp
src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp
src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp
src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp
src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp
src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il
src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp
src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp
src/hotspot/share/classfile/stringTable.cpp
src/hotspot/share/classfile/symbolTable.cpp
src/hotspot/share/compiler/compileBroker.cpp
src/hotspot/share/gc/g1/g1CollectedHeap.cpp
src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
src/hotspot/share/gc/g1/g1EvacStats.inline.hpp
src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp
src/hotspot/share/gc/g1/g1HotCardCache.cpp
src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp
src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp
src/hotspot/share/gc/g1/g1RemSet.cpp
src/hotspot/share/gc/parallel/parMarkBitMap.cpp
src/hotspot/share/gc/parallel/psParallelCompact.cpp
src/hotspot/share/gc/parallel/psParallelCompact.hpp
src/hotspot/share/gc/shared/oopStorage.cpp
src/hotspot/share/gc/shared/plab.inline.hpp
src/hotspot/share/gc/shared/preservedMarks.cpp
src/hotspot/share/gc/shared/ptrQueue.cpp
src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp
src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp
src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp
src/hotspot/share/gc/shared/workgroup.cpp
src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp
src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp
src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp
src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
src/hotspot/share/gc/z/zArray.inline.hpp
src/hotspot/share/gc/z/zLiveMap.inline.hpp
src/hotspot/share/gc/z/zMarkStackAllocator.cpp
src/hotspot/share/gc/z/zMarkTerminate.inline.hpp
src/hotspot/share/gc/z/zNMethodTableIteration.cpp
src/hotspot/share/gc/z/zObjectAllocator.cpp
src/hotspot/share/gc/z/zRelocationSet.inline.hpp
src/hotspot/share/gc/z/zStat.cpp
src/hotspot/share/jfr/utilities/jfrRefCountPointer.hpp
src/hotspot/share/logging/logOutputList.cpp
src/hotspot/share/memory/metaspace.cpp
src/hotspot/share/memory/universe.cpp
src/hotspot/share/oops/klass.cpp
src/hotspot/share/prims/resolvedMethodTable.cpp
src/hotspot/share/runtime/atomic.hpp
src/hotspot/share/runtime/os.cpp
src/hotspot/share/runtime/threadSMR.cpp
src/hotspot/share/runtime/threadSMR.inline.hpp
src/hotspot/share/services/mallocSiteTable.hpp
src/hotspot/share/services/mallocTracker.hpp
src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp
src/hotspot/share/utilities/globalCounter.cpp
src/hotspot/share/utilities/singleWriterSynchronizer.cpp
src/hotspot/share/utilities/singleWriterSynchronizer.hpp
src/hotspot/share/utilities/waitBarrier_generic.cpp
test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp
test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -437,7 +437,8 @@
   // for which we do not support MP and so membars are not necessary. This ARMv5 code will
   // be removed in the future.
 
-  // Support for jint Atomic::add(jint add_value, volatile jint *dest)
+  // Implementation of atomic_add(jint add_value, volatile jint* dest)
+  // used by Atomic::add(volatile jint* dest, jint add_value)
   //
   // Arguments :
   //
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -679,7 +679,8 @@
   }
 
 
-  // Support for jint Atomic::add(jint add_value, volatile jint* dest).
+  // Implementation of jint atomic_add(jint add_value, volatile jint* dest)
+  // used by Atomic::add(volatile jint* dest, jint add_value)
   //
   // Arguments:
   //
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -668,7 +668,8 @@
     return start;
   }
 
-  // Support for jint atomic::add(jint add_value, volatile jint* dest)
+  // Implementation of jint atomic_add(jint add_value, volatile jint* dest)
+  // used by Atomic::add(volatile jint* dest, jint add_value)
   //
   // Arguments :
   //    c_rarg0: add_value
@@ -690,7 +691,8 @@
     return start;
   }
 
-  // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
+  // Implementation of intptr_t atomic_add(intptr_t add_value, volatile intptr_t* dest)
+  // used by Atomic::add(volatile intptr_t* dest, intptr_t add_value)
   //
   // Arguments :
   //    c_rarg0: add_value
--- a/src/hotspot/os/bsd/os_bsd.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -1894,7 +1894,7 @@
   }
 
   char buf[PATH_MAX + 1];
-  int num = Atomic::add(1, &cnt);
+  int num = Atomic::add(&cnt, 1);
 
   snprintf(buf, PATH_MAX + 1, "%s/hs-vm-%d-%d",
            os::get_temp_directory(), os::current_process_id(), num);
@@ -3264,7 +3264,7 @@
 
   while (processor_id < 0) {
     if (Atomic::cmpxchg(-2, &mapping[apic_id], -1) == -1) {
-      Atomic::store(&mapping[apic_id], Atomic::add(1, &next_processor_id) - 1);
+      Atomic::store(&mapping[apic_id], Atomic::add(&next_processor_id, 1) - 1);
     }
     processor_id = Atomic::load(&mapping[apic_id]);
   }
--- a/src/hotspot/os/linux/os_linux.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os/linux/os_linux.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -2813,7 +2813,7 @@
   }
 
   char buf[PATH_MAX+1];
-  int num = Atomic::add(1, &cnt);
+  int num = Atomic::add(&cnt, 1);
 
   snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
            os::get_temp_directory(), os::current_process_id(), num);
--- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -96,13 +96,13 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -127,8 +127,8 @@
 
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -31,13 +31,13 @@
 struct Atomic::PlatformAdd
   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const;
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
                                                atomic_memory_order /* order */) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -92,8 +92,8 @@
 
 #ifdef AMD64
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
                                                atomic_memory_order /* order */) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
--- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -163,22 +163,22 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
 
 #ifdef ARM
-  return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
+  return add_using_helper<int>(arm_add_and_fetch, dest, add_value);
 #else
 #ifdef M68K
-  return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
+  return add_using_helper<int>(m68k_add_and_fetch, dest, add_value);
 #else
   return __sync_add_and_fetch(dest, add_value);
 #endif // M68K
@@ -186,8 +186,8 @@
 }
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename !>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -36,8 +36,8 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const {
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
     FULL_MEM_BARRIER;
     return res;
--- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -70,17 +70,17 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
-  return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
+  return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value);
 }
 
 
--- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -96,13 +96,13 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -127,8 +127,8 @@
 
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
--- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -78,13 +78,13 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I inc,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -137,8 +137,8 @@
 
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I inc,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
--- a/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -31,13 +31,13 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -59,8 +59,8 @@
 }
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
--- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -31,13 +31,13 @@
 struct Atomic::PlatformAdd
   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -93,8 +93,8 @@
 #ifdef AMD64
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
--- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -34,13 +34,13 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -49,8 +49,8 @@
 }
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
--- a/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -30,8 +30,8 @@
 // Implement ADD using a CAS loop.
 template<size_t byte_size>
 struct Atomic::PlatformAdd {
-  template<typename I, typename D>
-  inline D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
+  template<typename D, typename I>
+  inline D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
     D old_value = *dest;
     while (true) {
       D new_value = old_value + add_value;
--- a/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -44,14 +44,14 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 // Not using add_using_helper; see comment for cmpxchg.
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
@@ -62,8 +62,8 @@
 
 // Not using add_using_helper; see comment for cmpxchg.
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
--- a/src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il	Mon Nov 25 12:31:39 2019 +0100
@@ -49,7 +49,8 @@
       orq      %rdx, %rax
       .end
 
-  // Support for jint Atomic::add(jint add_value, volatile jint* dest)
+  // Implementation of jint _Atomic_add(jint add_value, volatile jint* dest)
+  // used by Atomic::add(volatile jint* dest, jint add_value)
       .inline _Atomic_add,2
       movl     %edi, %eax      // save add_value for return
       lock
@@ -57,7 +58,8 @@
       addl     %edi, %eax
       .end
 
-  // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest)
+  // Implementation of jlong _Atomic_add(jlong add_value, volatile jlong* dest)
+  // used by Atomic::add(volatile jlong* dest, jint add_value)
       .inline _Atomic_add_long,2
       movq     %rdi, %rax      // save add_value for return
       lock
--- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -57,23 +57,23 @@
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 #ifdef AMD64
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
-  return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
+  return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value);
 }
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
-  return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
+  return add_using_helper<int64_t>(os::atomic_add_long_func, dest, add_value);
 }
 
 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName)                  \
@@ -111,8 +111,8 @@
 #else // !AMD64
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
--- a/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -50,7 +50,7 @@
 }
 
 void ClassLoaderDataGraph::inc_instance_classes(size_t count) {
-  Atomic::add(count, &_num_instance_classes);
+  Atomic::add(&_num_instance_classes, count);
 }
 
 void ClassLoaderDataGraph::dec_instance_classes(size_t count) {
@@ -59,7 +59,7 @@
 }
 
 void ClassLoaderDataGraph::inc_array_classes(size_t count) {
-  Atomic::add(count, &_num_array_classes);
+  Atomic::add(&_num_array_classes, count);
 }
 
 void ClassLoaderDataGraph::dec_array_classes(size_t count) {
--- a/src/hotspot/share/classfile/stringTable.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/classfile/stringTable.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -214,11 +214,11 @@
 }
 
 size_t StringTable::item_added() {
-  return Atomic::add((size_t)1, &_items_count);
+  return Atomic::add(&_items_count, (size_t)1);
 }
 
 size_t StringTable::add_items_to_clean(size_t ndead) {
-  size_t total = Atomic::add((size_t)ndead, &_uncleaned_items_count);
+  size_t total = Atomic::add(&_uncleaned_items_count, (size_t)ndead);
   log_trace(stringtable)(
      "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
      _uncleaned_items_count, ndead, total);
@@ -226,7 +226,7 @@
 }
 
 void StringTable::item_removed() {
-  Atomic::add((size_t)-1, &_items_count);
+  Atomic::add(&_items_count, (size_t)-1);
 }
 
 double StringTable::get_load_factor() {
--- a/src/hotspot/share/classfile/symbolTable.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/classfile/symbolTable.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -724,7 +724,7 @@
     bdt.done(jt);
   }
 
-  Atomic::add(stdc._processed, &_symbols_counted);
+  Atomic::add(&_symbols_counted, stdc._processed);
 
   log_debug(symboltable)("Cleaned " SIZE_FORMAT " of " SIZE_FORMAT,
                          stdd._deleted, stdc._processed);
--- a/src/hotspot/share/compiler/compileBroker.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/compiler/compileBroker.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -1479,14 +1479,14 @@
     assert(!is_osr, "can't be osr");
     // Adapters, native wrappers and method handle intrinsics
     // should be generated always.
-    return Atomic::add(1, &_compilation_id);
+    return Atomic::add(&_compilation_id, 1);
   } else if (CICountOSR && is_osr) {
-    id = Atomic::add(1, &_osr_compilation_id);
+    id = Atomic::add(&_osr_compilation_id, 1);
     if (CIStartOSR <= id && id < CIStopOSR) {
       return id;
     }
   } else {
-    id = Atomic::add(1, &_compilation_id);
+    id = Atomic::add(&_compilation_id, 1);
     if (CIStart <= id && id < CIStop) {
       return id;
     }
@@ -1498,7 +1498,7 @@
 #else
   // CICountOSR is a develop flag and set to 'false' by default. In a product built,
   // only _compilation_id is incremented.
-  return Atomic::add(1, &_compilation_id);
+  return Atomic::add(&_compilation_id, 1);
 #endif
 }
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -4226,7 +4226,7 @@
     HeapRegion* r = g1h->region_at(region_idx);
     assert(!g1h->is_on_master_free_list(r), "sanity");
 
-    Atomic::add(r->rem_set()->occupied_locked(), &_rs_length);
+    Atomic::add(&_rs_length, r->rem_set()->occupied_locked());
 
     if (!is_young) {
       g1h->hot_card_cache()->reset_card_counts(r);
@@ -4290,7 +4290,7 @@
 
     // Claim serial work.
     if (_serial_work_claim == 0) {
-      jint value = Atomic::add(1, &_serial_work_claim) - 1;
+      jint value = Atomic::add(&_serial_work_claim, 1) - 1;
       if (value == 0) {
         double serial_time = os::elapsedTime();
         do_serial_work();
@@ -4305,7 +4305,7 @@
     bool has_non_young_time = false;
 
     while (true) {
-      size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
+      size_t end = Atomic::add(&_parallel_work_claim, chunk_size());
       size_t cur = end - chunk_size();
 
       if (cur >= _num_work_items) {
--- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -112,7 +112,7 @@
 
     // Claim a new chunk, returning its bounds [from, to[.
     void claim_chunk(uint& from, uint& to) {
-      uint result = Atomic::add(_chunk_size, &_cur_claim_idx);
+      uint result = Atomic::add(&_cur_claim_idx, _chunk_size);
       assert(_max_size > result - 1,
              "Array too small, is %u should be %u with chunk size %u.",
              _max_size, result, _chunk_size);
@@ -214,8 +214,8 @@
   void update_totals(uint num_regions, size_t reclaimable_bytes) {
     if (num_regions > 0) {
       assert(reclaimable_bytes > 0, "invariant");
-      Atomic::add(num_regions, &_num_regions_added);
-      Atomic::add(reclaimable_bytes, &_reclaimable_bytes_added);
+      Atomic::add(&_num_regions_added, num_regions);
+      Atomic::add(&_reclaimable_bytes_added, reclaimable_bytes);
     } else {
       assert(reclaimable_bytes == 0, "invariant");
     }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -207,7 +207,7 @@
     return NULL;
   }
 
-  size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
+  size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
   if (cur_idx >= _chunk_capacity) {
     return NULL;
   }
@@ -280,7 +280,7 @@
 
 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
   assert_at_safepoint();
-  size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
+  size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
   assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
   assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
          "end (" PTR_FORMAT ")", p2i(start), p2i(end));
@@ -308,7 +308,7 @@
     return NULL;
   }
 
-  size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
+  size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
   if (claimed_index < _num_root_regions) {
     return &_root_regions[claimed_index];
   }
@@ -1121,7 +1121,7 @@
   virtual void work(uint worker_id) {
     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
-    Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
+    Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild());
   }
 
   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
--- a/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -29,17 +29,17 @@
 #include "runtime/atomic.hpp"
 
 inline void G1EvacStats::add_direct_allocated(size_t value) {
-  Atomic::add(value, &_direct_allocated);
+  Atomic::add(&_direct_allocated, value);
 }
 
 inline void G1EvacStats::add_region_end_waste(size_t value) {
-  Atomic::add(value, &_region_end_waste);
+  Atomic::add(&_region_end_waste, value);
   Atomic::inc(&_regions_filled);
 }
 
 inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
-  Atomic::add(used, &_failure_used);
-  Atomic::add(waste, &_failure_waste);
+  Atomic::add(&_failure_used, used);
+  Atomic::add(&_failure_waste, waste);
 }
 
 #endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -101,7 +101,7 @@
 
   // Adjust the weak roots.
 
-  if (Atomic::add(1u, &_references_done) == 1u) { // First incr claims task.
+  if (Atomic::add(&_references_done, 1u) == 1u) { // First incr claims task.
     G1CollectedHeap::heap()->ref_processor_stw()->weak_oops_do(&_adjust);
   }
 
--- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -68,7 +68,7 @@
     return card_ptr;
   }
   // Otherwise, the card is hot.
-  size_t index = Atomic::add(1u, &_hot_cache_idx) - 1;
+  size_t index = Atomic::add(&_hot_cache_idx, 1u) - 1;
   size_t masked_index = index & (_hot_cache_size - 1);
   CardValue* current_ptr = _hot_cache[masked_index];
 
@@ -91,8 +91,8 @@
   assert(!use_cache(), "cache should be disabled");
 
   while (_hot_cache_par_claimed_idx < _hot_cache_size) {
-    size_t end_idx = Atomic::add(_hot_cache_par_chunk_size,
-                                 &_hot_cache_par_claimed_idx);
+    size_t end_idx = Atomic::add(&_hot_cache_par_claimed_idx,
+                                 _hot_cache_par_chunk_size);
     size_t start_idx = end_idx - _hot_cache_par_chunk_size;
     // The current worker has successfully claimed the chunk [start_idx..end_idx)
     end_idx = MIN2(end_idx, _hot_cache_size);
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -261,7 +261,7 @@
   virtual void work(uint worker_id) {
     size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
     while (true) {
-      char* touch_addr = Atomic::add(actual_chunk_size, &_cur_addr) - actual_chunk_size;
+      char* touch_addr = Atomic::add(&_cur_addr, actual_chunk_size) - actual_chunk_size;
       if (touch_addr < _start_addr || touch_addr >= _end_addr) {
         break;
       }
--- a/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -129,7 +129,7 @@
 
 void G1RedirtyCardsQueueSet::enqueue_completed_buffer(BufferNode* node) {
   assert(_collecting, "precondition");
-  Atomic::add(buffer_size() - node->index(), &_entry_count);
+  Atomic::add(&_entry_count, buffer_size() - node->index());
   _list.push(*node);
   update_tail(node);
 }
@@ -139,7 +139,7 @@
   const G1BufferNodeList from = src->take_all_completed_buffers();
   if (from._head != NULL) {
     assert(from._tail != NULL, "invariant");
-    Atomic::add(from._entry_count, &_entry_count);
+    Atomic::add(&_entry_count, from._entry_count);
     _list.prepend(*from._head, *from._tail);
     update_tail(from._tail);
   }
--- a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -46,7 +46,7 @@
 inline void G1RegionMarkStatsCache::evict(uint idx) {
   G1RegionMarkStatsCacheEntry* cur = &_cache[idx];
   if (cur->_stats._live_words != 0) {
-    Atomic::add(cur->_stats._live_words, &_target[cur->_region_idx]._live_words);
+    Atomic::add(&_target[cur->_region_idx]._live_words, cur->_stats._live_words);
   }
   cur->clear();
 }
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -179,7 +179,7 @@
 
       bool marked_as_dirty = Atomic::cmpxchg(true, &_contains[region], false) == false;
       if (marked_as_dirty) {
-        uint allocated = Atomic::add(1u, &_cur_idx) - 1;
+        uint allocated = Atomic::add(&_cur_idx, 1u) - 1;
         _buffer[allocated] = region;
       }
     }
@@ -255,7 +255,7 @@
 
     void work(uint worker_id) {
       while (_cur_dirty_regions < _regions->size()) {
-        uint next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
+        uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length;
         uint max = MIN2(next + _chunk_length, _regions->size());
 
         for (uint i = next; i < max; i++) {
@@ -447,7 +447,7 @@
 
   uint claim_cards_to_scan(uint region, uint increment) {
     assert(region < _max_regions, "Tried to access invalid region %u", region);
-    return Atomic::add(increment, &_card_table_scan_state[region]) - increment;
+    return Atomic::add(&_card_table_scan_state[region], increment) - increment;
   }
 
   void add_dirty_region(uint const region) {
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -90,7 +90,7 @@
     bool end_bit_ok = _end_bits.par_set_bit(end_bit);
     assert(end_bit_ok, "concurrency problem");
     DEBUG_ONLY(Atomic::inc(&mark_bitmap_count));
-    DEBUG_ONLY(Atomic::add(size, &mark_bitmap_size));
+    DEBUG_ONLY(Atomic::add(&mark_bitmap_size, size));
     return true;
   }
   return false;
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -532,7 +532,7 @@
   const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
 
   DEBUG_ONLY(Atomic::inc(&add_obj_count);)
-  DEBUG_ONLY(Atomic::add(len, &add_obj_size);)
+  DEBUG_ONLY(Atomic::add(&add_obj_size, len);)
 
   if (beg_region == end_region) {
     // All in one region.
@@ -2449,7 +2449,7 @@
   }
 
   bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
-    uint claimed = Atomic::add(1u, &_counter) - 1; // -1 is so that we start with zero
+    uint claimed = Atomic::add(&_counter, 1u) - 1; // -1 is so that we start with zero
     if (claimed < _insert_index) {
       reference = _backing_array[claimed];
       return true;
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -536,7 +536,7 @@
 {
   assert(_dc_and_los < dc_claimed, "already claimed");
   assert(_dc_and_los >= dc_one, "count would go negative");
-  Atomic::add(dc_mask, &_dc_and_los);
+  Atomic::add(&_dc_and_los, dc_mask);
 }
 
 inline HeapWord* ParallelCompactData::RegionData::data_location() const
@@ -576,7 +576,7 @@
 inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
 {
   assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
-  Atomic::add(static_cast<region_sz_t>(words), &_dc_and_los);
+  Atomic::add(&_dc_and_los, static_cast<region_sz_t>(words));
 }
 
 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
--- a/src/hotspot/share/gc/shared/oopStorage.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -144,7 +144,7 @@
 }
 
 void OopStorage::ActiveArray::increment_refcount() const {
-  int new_value = Atomic::add(1, &_refcount);
+  int new_value = Atomic::add(&_refcount, 1);
   assert(new_value >= 1, "negative refcount %d", new_value - 1);
 }
 
@@ -1010,7 +1010,7 @@
   // than a CAS loop on some platforms when there is contention.
   // We can cope with the uncertainty by recomputing start/end from
   // the result of the add, and dealing with potential overshoot.
-  size_t end = Atomic::add(step, &_next_block);
+  size_t end = Atomic::add(&_next_block, step);
   // _next_block may have changed, so recompute start from result of add.
   start = end - step;
   // _next_block may have changed so much that end has overshot.
--- a/src/hotspot/share/gc/shared/plab.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shared/plab.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -43,19 +43,19 @@
 }
 
 void PLABStats::add_allocated(size_t v) {
-  Atomic::add(v, &_allocated);
+  Atomic::add(&_allocated, v);
 }
 
 void PLABStats::add_unused(size_t v) {
-  Atomic::add(v, &_unused);
+  Atomic::add(&_unused, v);
 }
 
 void PLABStats::add_wasted(size_t v) {
-  Atomic::add(v, &_wasted);
+  Atomic::add(&_wasted, v);
 }
 
 void PLABStats::add_undo_wasted(size_t v) {
-  Atomic::add(v, &_undo_wasted);
+  Atomic::add(&_undo_wasted, v);
 }
 
 #endif // SHARE_GC_SHARED_PLAB_INLINE_HPP
--- a/src/hotspot/share/gc/shared/preservedMarks.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shared/preservedMarks.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -55,7 +55,7 @@
   restore();
   // Only do the atomic add if the size is > 0.
   if (stack_size > 0) {
-    Atomic::add(stack_size, total_size_addr);
+    Atomic::add(total_size_addr, stack_size);
   }
 }
 
--- a/src/hotspot/share/gc/shared/ptrQueue.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shared/ptrQueue.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -182,7 +182,7 @@
   const size_t trigger_transfer = 10;
 
   // Add to pending list. Update count first so no underflow in transfer.
-  size_t pending_count = Atomic::add(1u, &_pending_count);
+  size_t pending_count = Atomic::add(&_pending_count, 1u);
   _pending_list.push(*node);
   if (pending_count > trigger_transfer) {
     try_transfer_pending();
@@ -219,7 +219,7 @@
 
     // Add synchronized nodes to _free_list.
     // Update count first so no underflow in allocate().
-    Atomic::add(count, &_free_count);
+    Atomic::add(&_free_count, count);
     _free_list.prepend(*first, *last);
     log_trace(gc, ptrqueue, freelist)
              ("Transferred %s pending to free: " SIZE_FORMAT, name(), count);
@@ -258,4 +258,3 @@
 void PtrQueueSet::deallocate_buffer(BufferNode* node) {
   _allocator->release(node);
 }
-
--- a/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -246,7 +246,7 @@
 
 void ReferenceProcessorPhaseTimes::add_ref_cleared(ReferenceType ref_type, size_t count) {
   ASSERT_REF_TYPE(ref_type);
-  Atomic::add(count, &_ref_cleared[ref_type_2_index(ref_type)]);
+  Atomic::add(&_ref_cleared[ref_type_2_index(ref_type)], count);
 }
 
 void ReferenceProcessorPhaseTimes::set_ref_discovered(ReferenceType ref_type, size_t count) {
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -32,7 +32,7 @@
 volatile size_t   StringDedupQueue::_claimed_index = 0;
 
 size_t StringDedupQueue::claim() {
-  return Atomic::add(size_t(1), &_claimed_index) - 1;
+  return Atomic::add(&_claimed_index, size_t(1)) - 1;
 }
 
 void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) {
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -589,7 +589,7 @@
 }
 
 size_t StringDedupTable::claim_table_partition(size_t partition_size) {
-  return Atomic::add(partition_size, &_claimed_index) - partition_size;
+  return Atomic::add(&_claimed_index, partition_size) - partition_size;
 }
 
 void StringDedupTable::verify() {
--- a/src/hotspot/share/gc/shared/workgroup.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shared/workgroup.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -153,7 +153,7 @@
     // Wait for the coordinator to dispatch a task.
     _start_semaphore->wait();
 
-    uint num_started = Atomic::add(1u, &_started);
+    uint num_started = Atomic::add(&_started, 1u);
 
     // Subtract one to get a zero-indexed worker id.
     uint worker_id = num_started - 1;
--- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -264,7 +264,7 @@
 
   size_t max = (size_t)list->length();
   while (_claimed < max) {
-    size_t cur = Atomic::add(stride, &_claimed) - stride;
+    size_t cur = Atomic::add(&_claimed, stride) - stride;
     size_t start = cur;
     size_t end = MIN2(cur + stride, max);
     if (start >= max) break;
--- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -593,7 +593,7 @@
 
 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
   assert(ShenandoahPacing, "should only call when pacing is enabled");
-  Atomic::add(words, &_allocs_seen);
+  Atomic::add(&_allocs_seen, words);
 }
 
 void ShenandoahControlThread::set_forced_counters_update(bool value) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -620,7 +620,7 @@
 }
 
 void ShenandoahHeap::increase_used(size_t bytes) {
-  Atomic::add(bytes, &_used);
+  Atomic::add(&_used, bytes);
 }
 
 void ShenandoahHeap::set_used(size_t bytes) {
@@ -633,7 +633,7 @@
 }
 
 void ShenandoahHeap::increase_allocated(size_t bytes) {
-  Atomic::add(bytes, &_bytes_allocated_since_gc_start);
+  Atomic::add(&_bytes_allocated_since_gc_start, bytes);
 }
 
 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
@@ -1350,7 +1350,7 @@
 
     size_t max = _heap->num_regions();
     while (_index < max) {
-      size_t cur = Atomic::add(stride, &_index) - stride;
+      size_t cur = Atomic::add(&_index, stride) - stride;
       size_t start = cur;
       size_t end = MIN2(cur + stride, max);
       if (start >= max) break;
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -49,7 +49,7 @@
 
 
 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
-  size_t new_index = Atomic::add((size_t) 1, &_index);
+  size_t new_index = Atomic::add(&_index, (size_t) 1);
   // get_region() provides the bounds-check and returns NULL on OOB.
   return _heap->get_region(new_index - 1);
 }
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -687,7 +687,7 @@
 }
 
 void ShenandoahHeapRegion::record_pin() {
-  Atomic::add((size_t)1, &_critical_pins);
+  Atomic::add(&_critical_pins, (size_t)1);
 }
 
 void ShenandoahHeapRegion::record_unpin() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -103,7 +103,7 @@
 }
 
 inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
-  size_t new_live_data = Atomic::add(s, &_live_data);
+  size_t new_live_data = Atomic::add(&_live_data, s);
 #ifdef ASSERT
   size_t live_bytes = new_live_data * HeapWordSize;
   size_t used_bytes = used();
--- a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -132,7 +132,7 @@
 }
 
 void BinaryMagnitudeSeq::add(size_t val) {
-  Atomic::add(val, &_sum);
+  Atomic::add(&_sum, val);
 
   int mag = log2_intptr(val) + 1;
 
@@ -147,7 +147,7 @@
     mag = BitsPerSize_t - 1;
   }
 
-  Atomic::add((size_t)1, &_mags[mag]);
+  Atomic::add(&_mags[mag], (size_t)1);
 }
 
 size_t BinaryMagnitudeSeq::level(int level) const {
--- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -223,7 +223,7 @@
   }
 
   intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate));
-  Atomic::add(tax, &_budget);
+  Atomic::add(&_budget, tax);
 }
 
 intptr_t ShenandoahPacer::epoch() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -47,13 +47,13 @@
 inline void ShenandoahPacer::report_internal(size_t words) {
   assert(ShenandoahPacing, "Only be here when pacing is enabled");
   STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
-  Atomic::add((intptr_t)words, &_budget);
+  Atomic::add(&_budget, (intptr_t)words);
 }
 
 inline void ShenandoahPacer::report_progress_internal(size_t words) {
   assert(ShenandoahPacing, "Only be here when pacing is enabled");
   STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
-  Atomic::add((intptr_t)words, &_progress);
+  Atomic::add(&_progress, (intptr_t)words);
 }
 
 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -304,7 +304,7 @@
     return NULL;
   }
 
-  jint index = Atomic::add(1, &_claimed_index);
+  jint index = Atomic::add(&_claimed_index, 1);
 
   if (index <= size) {
     return GenericTaskQueueSet<T, F>::queue((uint)index - 1);
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -139,7 +139,7 @@
           // skip
           break;
         case ShenandoahVerifier::_verify_liveness_complete:
-          Atomic::add((uint) obj->size(), &_ld[obj_reg->region_number()]);
+          Atomic::add(&_ld[obj_reg->region_number()], (uint) obj->size());
           // fallthrough for fast failure for un-live regions:
         case ShenandoahVerifier::_verify_liveness_conservative:
           check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(),
@@ -479,7 +479,7 @@
       }
     }
 
-    Atomic::add(processed, &_processed);
+    Atomic::add(&_processed, processed);
   }
 };
 
@@ -518,7 +518,7 @@
                                   _options);
 
     while (true) {
-      size_t v = Atomic::add(1u, &_claimed) - 1;
+      size_t v = Atomic::add(&_claimed, 1u) - 1;
       if (v < _heap->num_regions()) {
         ShenandoahHeapRegion* r = _heap->get_region(v);
         if (!r->is_humongous() && !r->is_trash()) {
@@ -538,7 +538,7 @@
     if (_heap->complete_marking_context()->is_marked((oop)obj)) {
       verify_and_follow(obj, stack, cl, &processed);
     }
-    Atomic::add(processed, &_processed);
+    Atomic::add(&_processed, processed);
   }
 
   virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) {
@@ -571,7 +571,7 @@
       }
     }
 
-    Atomic::add(processed, &_processed);
+    Atomic::add(&_processed, processed);
   }
 
   void verify_and_follow(HeapWord *addr, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl, size_t *processed) {
--- a/src/hotspot/share/gc/z/zArray.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/z/zArray.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -101,7 +101,7 @@
 template <typename T, bool parallel>
 inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
   if (parallel) {
-    const size_t next = Atomic::add(1u, &_next) - 1u;
+    const size_t next = Atomic::add(&_next, 1u) - 1u;
     if (next < _array->size()) {
       *elem = _array->at(next);
       return true;
--- a/src/hotspot/share/gc/z/zLiveMap.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/z/zLiveMap.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -121,8 +121,8 @@
 }
 
 inline void ZLiveMap::inc_live(uint32_t objects, size_t bytes) {
-  Atomic::add(objects, &_live_objects);
-  Atomic::add(bytes, &_live_bytes);
+  Atomic::add(&_live_objects, objects);
+  Atomic::add(&_live_bytes, bytes);
 }
 
 inline BitMap::idx_t ZLiveMap::segment_start(BitMap::idx_t segment) const {
--- a/src/hotspot/share/gc/z/zMarkStackAllocator.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/z/zMarkStackAllocator.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -110,8 +110,8 @@
 
   // Increment top before end to make sure another
   // thread can't steal out newly expanded space.
-  addr = Atomic::add(size, &_top) - size;
-  Atomic::add(expand_size, &_end);
+  addr = Atomic::add(&_top, size) - size;
+  Atomic::add(&_end, expand_size);
 
   return addr;
 }
--- a/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -37,7 +37,7 @@
 }
 
 inline void ZMarkTerminate::exit_stage(volatile uint* nworking_stage) {
-  Atomic::add(1u, nworking_stage);
+  Atomic::add(nworking_stage, 1u);
 }
 
 inline bool ZMarkTerminate::try_exit_stage(volatile uint* nworking_stage) {
--- a/src/hotspot/share/gc/z/zNMethodTableIteration.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/z/zNMethodTableIteration.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -58,7 +58,7 @@
     // Claim table partition. Each partition is currently sized to span
     // two cache lines. This number is just a guess, but seems to work well.
     const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
-    const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _size);
+    const size_t partition_start = MIN2(Atomic::add(&_claimed, partition_size) - partition_size, _size);
     const size_t partition_end = MIN2(partition_start + partition_size, _size);
     if (partition_start == partition_end) {
       // End of table
--- a/src/hotspot/share/gc/z/zObjectAllocator.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -63,7 +63,7 @@
   ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
   if (page != NULL) {
     // Increment used bytes
-    Atomic::add(size, _used.addr());
+    Atomic::add(_used.addr(), size);
   }
 
   return page;
@@ -71,7 +71,7 @@
 
 void ZObjectAllocator::undo_alloc_page(ZPage* page) {
   // Increment undone bytes
-  Atomic::add(page->size(), _undone.addr());
+  Atomic::add(_undone.addr(), page->size());
 
   ZHeap::heap()->undo_alloc_page(page);
 }
--- a/src/hotspot/share/gc/z/zRelocationSet.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/z/zRelocationSet.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -38,7 +38,7 @@
 
   if (parallel) {
     if (_next < nforwardings) {
-      const size_t next = Atomic::add(1u, &_next) - 1u;
+      const size_t next = Atomic::add(&_next, 1u) - 1u;
       if (next < nforwardings) {
         *forwarding = _relocation_set->_forwardings[next];
         return true;
--- a/src/hotspot/share/gc/z/zStat.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/gc/z/zStat.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -761,8 +761,8 @@
 //
 void ZStatSample(const ZStatSampler& sampler, uint64_t value) {
   ZStatSamplerData* const cpu_data = sampler.get();
-  Atomic::add(1u, &cpu_data->_nsamples);
-  Atomic::add(value, &cpu_data->_sum);
+  Atomic::add(&cpu_data->_nsamples, 1u);
+  Atomic::add(&cpu_data->_sum, value);
 
   uint64_t max = cpu_data->_max;
   for (;;) {
@@ -787,14 +787,14 @@
 
 void ZStatInc(const ZStatCounter& counter, uint64_t increment) {
   ZStatCounterData* const cpu_data = counter.get();
-  const uint64_t value = Atomic::add(increment, &cpu_data->_counter);
+  const uint64_t value = Atomic::add(&cpu_data->_counter, increment);
 
   ZTracer::tracer()->report_stat_counter(counter, increment, value);
 }
 
 void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment) {
   ZStatCounterData* const cpu_data = counter.get();
-  Atomic::add(increment, &cpu_data->_counter);
+  Atomic::add(&cpu_data->_counter, increment);
 }
 
 //
--- a/src/hotspot/share/jfr/utilities/jfrRefCountPointer.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/jfr/utilities/jfrRefCountPointer.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -92,11 +92,11 @@
   MultiThreadedRefCounter() : _refs(0) {}
 
   void inc() const {
-    Atomic::add(1, &_refs);
+    Atomic::add(&_refs, 1);
   }
 
   bool dec() const {
-    return 0 == Atomic::add((-1), &_refs);
+    return 0 == Atomic::add(&_refs, (-1));
   }
 
   int current() const {
--- a/src/hotspot/share/logging/logOutputList.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/logging/logOutputList.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -30,13 +30,13 @@
 #include "utilities/globalDefinitions.hpp"
 
 jint LogOutputList::increase_readers() {
-  jint result = Atomic::add(1, &_active_readers);
+  jint result = Atomic::add(&_active_readers, 1);
   assert(_active_readers > 0, "Ensure we have consistent state");
   return result;
 }
 
 jint LogOutputList::decrease_readers() {
-  jint result = Atomic::add(-1, &_active_readers);
+  jint result = Atomic::add(&_active_readers, -1);
   assert(result >= 0, "Ensure we have consistent state");
   return result;
 }
--- a/src/hotspot/share/memory/metaspace.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/memory/metaspace.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -394,7 +394,7 @@
 }
 
 static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
-  Atomic::add(words, pstat);
+  Atomic::add(pstat, words);
 }
 
 static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
--- a/src/hotspot/share/memory/universe.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/memory/universe.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -580,7 +580,7 @@
   int next;
   if ((_preallocated_out_of_memory_error_avail_count > 0) &&
       SystemDictionary::Throwable_klass()->is_initialized()) {
-    next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
+    next = (int)Atomic::add(&_preallocated_out_of_memory_error_avail_count, -1);
     assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
   } else {
     next = -1;
--- a/src/hotspot/share/oops/klass.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/oops/klass.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -710,7 +710,7 @@
 }
 
 int Klass::atomic_incr_biased_lock_revocation_count() {
-  return (int) Atomic::add(1, &_biased_lock_revocation_count);
+  return (int) Atomic::add(&_biased_lock_revocation_count, 1);
 }
 
 // Unless overridden, jvmti_class_status has no flags set.
--- a/src/hotspot/share/prims/resolvedMethodTable.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/prims/resolvedMethodTable.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -327,7 +327,7 @@
 }
 
 void ResolvedMethodTable::inc_dead_counter(size_t ndead) {
-  size_t total = Atomic::add(ndead, &_uncleaned_items_count);
+  size_t total = Atomic::add(&_uncleaned_items_count, ndead);
   log_trace(membername, table)(
      "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
      _uncleaned_items_count, ndead, total);
--- a/src/hotspot/share/runtime/atomic.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/runtime/atomic.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -100,8 +100,8 @@
   // Atomically add to a location. Returns updated value. add*() provide:
   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
 
-  template<typename I, typename D>
-  inline static D add(I add_value, D volatile* dest,
+  template<typename D, typename I>
+  inline static D add(D volatile* dest, I add_value,
                       atomic_memory_order order = memory_order_conservative);
 
   template<typename I, typename D>
@@ -224,7 +224,7 @@
   // Dispatch handler for add.  Provides type-based validity checking
   // and limited conversions around calls to the platform-specific
   // implementation layer provided by PlatformAdd.
-  template<typename I, typename D, typename Enable = void>
+  template<typename D, typename I, typename Enable = void>
   struct AddImpl;
 
   // Platform-specific implementation of add.  Support for sizes of 4
@@ -239,7 +239,7 @@
   // - platform_add is an object of type PlatformAdd<sizeof(D)>.
   //
   // Then
-  //   platform_add(add_value, dest)
+  //   platform_add(dest, add_value)
   // must be a valid expression, returning a result convertible to D.
   //
   // No definition is provided; all platforms must explicitly define
@@ -259,12 +259,12 @@
   // otherwise, addend is add_value.
   //
   // FetchAndAdd requires the derived class to provide
-  //   fetch_and_add(addend, dest)
+  //   fetch_and_add(dest, addend)
   // atomically adding addend to the value of dest, and returning the
   // old value.
   //
   // AddAndFetch requires the derived class to provide
-  //   add_and_fetch(addend, dest)
+  //   add_and_fetch(dest, addend)
   // atomically adding addend to the value of dest, and returning the
   // new value.
   //
@@ -286,8 +286,8 @@
   // function.  No scaling of add_value is performed when D is a pointer
   // type, so this function can be used to implement the support function
   // required by AddAndFetch.
-  template<typename Type, typename Fn, typename I, typename D>
-  static D add_using_helper(Fn fn, I add_value, D volatile* dest);
+  template<typename Type, typename Fn, typename D, typename I>
+  static D add_using_helper(Fn fn, D volatile* dest, I add_value);
 
   // Dispatch handler for cmpxchg.  Provides type-based validity
   // checking and limited conversions around calls to the
@@ -517,21 +517,21 @@
 
 template<typename Derived>
 struct Atomic::FetchAndAdd {
-  template<typename I, typename D>
-  D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<typename Derived>
 struct Atomic::AddAndFetch {
-  template<typename I, typename D>
-  D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 template<typename D>
 inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
   typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
-  Atomic::add(I(1), dest, order);
+  Atomic::add(dest, I(1), order);
 }
 
 template<typename D>
@@ -540,7 +540,7 @@
   typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
   // Assumes two's complement integer representation.
   #pragma warning(suppress: 4146)
-  Atomic::add(I(-1), dest, order);
+  Atomic::add(dest, I(-1), order);
 }
 
 template<typename I, typename D>
@@ -557,7 +557,7 @@
   AddendType addend = sub_value;
   // Assumes two's complement integer representation.
   #pragma warning(suppress: 4146) // In case AddendType is not signed.
-  return Atomic::add(-addend, dest, order);
+  return Atomic::add(dest, -addend, order);
 }
 
 // Define the class before including platform file, which may specialize
@@ -678,68 +678,68 @@
   StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);
 }
 
-template<typename I, typename D>
-inline D Atomic::add(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::add(D volatile* dest, I add_value,
                      atomic_memory_order order) {
-  return AddImpl<I, D>()(add_value, dest, order);
+  return AddImpl<D, I>()(dest, add_value, order);
 }
 
-template<typename I, typename D>
+template<typename D, typename I>
 struct Atomic::AddImpl<
-  I, D,
+  D, I,
   typename EnableIf<IsIntegral<I>::value &&
                     IsIntegral<D>::value &&
                     (sizeof(I) <= sizeof(D)) &&
                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 {
-  D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
+  D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
     D addend = add_value;
-    return PlatformAdd<sizeof(D)>()(addend, dest, order);
+    return PlatformAdd<sizeof(D)>()(dest, addend, order);
   }
 };
 
-template<typename I, typename P>
+template<typename P, typename I>
 struct Atomic::AddImpl<
-  I, P*,
+  P*, I,
   typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
 {
-  P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const {
+  P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const {
     STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
     STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
     typedef typename Conditional<IsSigned<I>::value,
                                  intptr_t,
                                  uintptr_t>::type CI;
     CI addend = add_value;
-    return PlatformAdd<sizeof(P*)>()(addend, dest, order);
+    return PlatformAdd<sizeof(P*)>()(dest, addend, order);
   }
 };
 
 template<typename Derived>
-template<typename I, typename D>
-inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::FetchAndAdd<Derived>::operator()(D volatile* dest, I add_value,
                                                   atomic_memory_order order) const {
   I addend = add_value;
   // If D is a pointer type P*, scale by sizeof(P).
   if (IsPointer<D>::value) {
     addend *= sizeof(typename RemovePointer<D>::type);
   }
-  D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order);
+  D old = static_cast<const Derived*>(this)->fetch_and_add(dest, addend, order);
   return old + add_value;
 }
 
 template<typename Derived>
-template<typename I, typename D>
-inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::AddAndFetch<Derived>::operator()(D volatile* dest, I add_value,
                                                   atomic_memory_order order) const {
   // If D is a pointer type P*, scale by sizeof(P).
   if (IsPointer<D>::value) {
     add_value *= sizeof(typename RemovePointer<D>::type);
   }
-  return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order);
+  return static_cast<const Derived*>(this)->add_and_fetch(dest, add_value, order);
 }
 
-template<typename Type, typename Fn, typename I, typename D>
-inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
+template<typename Type, typename Fn, typename D, typename I>
+inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
   return PrimitiveConversions::cast<D>(
     fn(PrimitiveConversions::cast<Type>(add_value),
        reinterpret_cast<Type volatile*>(dest)));
--- a/src/hotspot/share/runtime/os.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/runtime/os.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -668,7 +668,7 @@
     if ((cur_malloc_words + words) > MallocMaxTestWords) {
       return true;
     }
-    Atomic::add(words, &cur_malloc_words);
+    Atomic::add(&cur_malloc_words, words);
   }
   return false;
 }
--- a/src/hotspot/share/runtime/threadSMR.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/runtime/threadSMR.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -134,7 +134,7 @@
 // 'inline' functions first so the definitions are before first use:
 
 inline void ThreadsSMRSupport::add_deleted_thread_times(uint add_value) {
-  Atomic::add(add_value, &_deleted_thread_times);
+  Atomic::add(&_deleted_thread_times, add_value);
 }
 
 inline void ThreadsSMRSupport::inc_deleted_thread_cnt() {
--- a/src/hotspot/share/runtime/threadSMR.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/runtime/threadSMR.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -56,7 +56,7 @@
 // they are called by public inline update_tlh_stats() below:
 
 inline void ThreadsSMRSupport::add_tlh_times(uint add_value) {
-  Atomic::add(add_value, &_tlh_times);
+  Atomic::add(&_tlh_times, add_value);
 }
 
 inline void ThreadsSMRSupport::inc_tlh_cnt() {
--- a/src/hotspot/share/services/mallocSiteTable.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/services/mallocSiteTable.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -153,7 +153,7 @@
     // Acquire shared lock.
     // Return true if shared access is granted.
     inline bool sharedLock() {
-      jint res = Atomic::add(1, _lock);
+      jint res = Atomic::add(_lock, 1);
       if (res < 0) {
         Atomic::dec(_lock);
         return false;
--- a/src/hotspot/share/services/mallocTracker.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/services/mallocTracker.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -55,7 +55,7 @@
   inline void allocate(size_t sz) {
     Atomic::inc(&_count);
     if (sz > 0) {
-      Atomic::add(sz, &_size);
+      Atomic::add(&_size, sz);
       DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
     }
     DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
@@ -72,7 +72,7 @@
 
   inline void resize(long sz) {
     if (sz != 0) {
-      Atomic::add(size_t(sz), &_size);
+      Atomic::add(&_size, size_t(sz));
       DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
     }
   }
--- a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -53,7 +53,7 @@
 
   // Returns true if you succeeded to claim the range start -> (stop-1).
   bool claim(size_t* start, size_t* stop) {
-    size_t claimed = Atomic::add((size_t)1, &_next_to_claim) - 1;
+    size_t claimed = Atomic::add(&_next_to_claim, (size_t)1) - 1;
     if (claimed >= _stop_task) {
       return false;
     }
--- a/src/hotspot/share/utilities/globalCounter.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/utilities/globalCounter.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -59,7 +59,7 @@
 void GlobalCounter::write_synchronize() {
   assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section");
   // Atomic::add must provide fence since we have storeload dependency.
-  uintx gbl_cnt = Atomic::add(COUNTER_INCREMENT, &_global_counter._counter);
+  uintx gbl_cnt = Atomic::add(&_global_counter._counter, COUNTER_INCREMENT);
 
   // Do all RCU threads.
   CounterThreadCheck ctc(gbl_cnt);
--- a/src/hotspot/share/utilities/singleWriterSynchronizer.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/utilities/singleWriterSynchronizer.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -44,7 +44,7 @@
 // synchronization have exited that critical section.
 void SingleWriterSynchronizer::synchronize() {
   // Side-effect in assert balanced by debug-only dec at end.
-  assert(Atomic::add(1u, &_writers) == 1u, "multiple writers");
+  assert(Atomic::add(&_writers, 1u) == 1u, "multiple writers");
   // We don't know anything about the muxing between this invocation
   // and invocations in other threads.  We must start with the latest
   // _enter polarity, else we could clobber the wrong _exit value on
--- a/src/hotspot/share/utilities/singleWriterSynchronizer.hpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/utilities/singleWriterSynchronizer.hpp	Mon Nov 25 12:31:39 2019 +0100
@@ -89,11 +89,11 @@
 };
 
 inline uint SingleWriterSynchronizer::enter() {
-  return Atomic::add(2u, &_enter);
+  return Atomic::add(&_enter, 2u);
 }
 
 inline void SingleWriterSynchronizer::exit(uint enter_value) {
-  uint exit_value = Atomic::add(2u, &_exit[enter_value & 1]);
+  uint exit_value = Atomic::add(&_exit[enter_value & 1], 2u);
   // If this exit completes a synchronize request, wakeup possibly
   // waiting synchronizer.  Read of _waiting_for must follow the _exit
   // update.
--- a/src/hotspot/share/utilities/waitBarrier_generic.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/src/hotspot/share/utilities/waitBarrier_generic.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -82,13 +82,13 @@
     OrderAccess::fence();
     return;
   }
-  Atomic::add(1, &_barrier_threads);
+  Atomic::add(&_barrier_threads, 1);
   if (barrier_tag != 0 && barrier_tag == _barrier_tag) {
-    Atomic::add(1, &_waiters);
+    Atomic::add(&_waiters, 1);
     _sem_barrier.wait();
     // We help out with posting, but we need to do so before we decrement the
     // _barrier_threads otherwise we might wake threads up in next wait.
     GenericWaitBarrier::wake_if_needed();
   }
-  Atomic::add(-1, &_barrier_threads);
+  Atomic::add(&_barrier_threads, -1);
 }
--- a/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -114,7 +114,7 @@
       ThreadBlockInVM tbiv(this); // Safepoint check.
     }
     tty->print_cr("%u allocations: " SIZE_FORMAT, _thread_number, _allocations);
-    Atomic::add(_allocations, _total_allocations);
+    Atomic::add(_total_allocations, _allocations);
   }
 };
 
--- a/test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp	Mon Nov 25 12:30:24 2019 +0100
+++ b/test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp	Mon Nov 25 12:31:39 2019 +0100
@@ -157,7 +157,7 @@
       ThreadBlockInVM tbiv(this); // Safepoint check.
     }
     tty->print_cr("allocations: " SIZE_FORMAT, _allocations);
-    Atomic::add(_allocations, _total_allocations);
+    Atomic::add(_total_allocations, _allocations);
   }
 };