8234738: Harmonize parameter order in Atomic - sub
authorstefank
Mon, 25 Nov 2019 12:32:07 +0100
changeset 59250 a6deb69743d4
parent 59249 29b0d0b61615
child 59251 4cbfa5077d68
8234738: Harmonize parameter order in Atomic - sub Reviewed-by: rehn, dholmes
src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp
src/hotspot/share/code/dependencyContext.cpp
src/hotspot/share/gc/shared/oopStorage.cpp
src/hotspot/share/gc/shared/ptrQueue.cpp
src/hotspot/share/gc/shared/workgroup.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
src/hotspot/share/gc/z/zForwarding.inline.hpp
src/hotspot/share/gc/z/zMarkTerminate.inline.hpp
src/hotspot/share/memory/metaspace.cpp
src/hotspot/share/runtime/atomic.hpp
src/hotspot/share/services/mallocTracker.hpp
--- a/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp	Mon Nov 25 12:32:07 2019 +0100
@@ -55,7 +55,7 @@
 
 void ClassLoaderDataGraph::dec_instance_classes(size_t count) {
   assert(count <= _num_instance_classes, "Sanity");
-  Atomic::sub(count, &_num_instance_classes);
+  Atomic::sub(&_num_instance_classes, count);
 }
 
 void ClassLoaderDataGraph::inc_array_classes(size_t count) {
@@ -64,7 +64,7 @@
 
 void ClassLoaderDataGraph::dec_array_classes(size_t count) {
   assert(count <= _num_array_classes, "Sanity");
-  Atomic::sub(count, &_num_array_classes);
+  Atomic::sub(&_num_array_classes, count);
 }
 
 bool ClassLoaderDataGraph::should_clean_metaspaces_and_reset() {
--- a/src/hotspot/share/code/dependencyContext.cpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/code/dependencyContext.cpp	Mon Nov 25 12:32:07 2019 +0100
@@ -260,7 +260,7 @@
 #endif //PRODUCT
 
 int nmethodBucket::decrement() {
-  return Atomic::sub(1, &_count);
+  return Atomic::sub(&_count, 1);
 }
 
 // We use a monotonically increasing epoch counter to track the last epoch a given
--- a/src/hotspot/share/gc/shared/oopStorage.cpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp	Mon Nov 25 12:32:07 2019 +0100
@@ -149,7 +149,7 @@
 }
 
 bool OopStorage::ActiveArray::decrement_refcount() const {
-  int new_value = Atomic::sub(1, &_refcount);
+  int new_value = Atomic::sub(&_refcount, 1);
   assert(new_value >= 0, "negative refcount %d", new_value);
   return new_value == 0;
 }
@@ -724,7 +724,7 @@
     }
     // Release the contiguous entries that are in block.
     block->release_entries(releasing, this);
-    Atomic::sub(count, &_allocation_count);
+    Atomic::sub(&_allocation_count, count);
   }
 }
 
--- a/src/hotspot/share/gc/shared/ptrQueue.cpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/gc/shared/ptrQueue.cpp	Mon Nov 25 12:32:07 2019 +0100
@@ -150,7 +150,7 @@
     // Decrement count after getting buffer from free list.  This, along
     // with incrementing count before adding to free list, ensures count
     // never underflows.
-    size_t count = Atomic::sub(1u, &_free_count);
+    size_t count = Atomic::sub(&_free_count, 1u);
     assert((count + 1) != 0, "_free_count underflow");
   }
   return node;
@@ -212,7 +212,7 @@
       last = next;
       ++count;
     }
-    Atomic::sub(count, &_pending_count);
+    Atomic::sub(&_pending_count, count);
 
     // Wait for any in-progress pops, to avoid ABA for them.
     GlobalCounter::write_synchronize();
@@ -236,7 +236,7 @@
     if (node == NULL) break;
     BufferNode::deallocate(node);
   }
-  size_t new_count = Atomic::sub(removed, &_free_count);
+  size_t new_count = Atomic::sub(&_free_count, removed);
   log_debug(gc, ptrqueue, freelist)
            ("Reduced %s free list by " SIZE_FORMAT " to " SIZE_FORMAT,
             name(), removed, new_count);
--- a/src/hotspot/share/gc/shared/workgroup.cpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/gc/shared/workgroup.cpp	Mon Nov 25 12:32:07 2019 +0100
@@ -164,7 +164,7 @@
   void worker_done_with_task() {
     // Mark that the worker is done with the task.
     // The worker is not allowed to read the state variables after this line.
-    uint not_finished = Atomic::sub(1u, &_not_finished);
+    uint not_finished = Atomic::sub(&_not_finished, 1u);
 
     // The last worker signals to the coordinator that all work is completed.
     if (not_finished == 0) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Mon Nov 25 12:32:07 2019 +0100
@@ -629,7 +629,7 @@
 
 void ShenandoahHeap::decrease_used(size_t bytes) {
   assert(used() >= bytes, "never decrease heap size by more than we've left");
-  Atomic::sub(bytes, &_used);
+  Atomic::sub(&_used, bytes);
 }
 
 void ShenandoahHeap::increase_allocated(size_t bytes) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp	Mon Nov 25 12:32:07 2019 +0100
@@ -692,7 +692,7 @@
 
 void ShenandoahHeapRegion::record_unpin() {
   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", region_number());
-  Atomic::sub((size_t)1, &_critical_pins);
+  Atomic::sub(&_critical_pins, (size_t)1);
 }
 
 size_t ShenandoahHeapRegion::pin_count() const {
--- a/src/hotspot/share/gc/z/zForwarding.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/gc/z/zForwarding.inline.hpp	Mon Nov 25 12:32:07 2019 +0100
@@ -76,7 +76,7 @@
 
 inline bool ZForwarding::dec_refcount() {
   assert(_refcount > 0, "Invalid state");
-  return Atomic::sub(1u, &_refcount) == 0u;
+  return Atomic::sub(&_refcount, 1u) == 0u;
 }
 
 inline bool ZForwarding::retain_page() {
--- a/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp	Mon Nov 25 12:32:07 2019 +0100
@@ -33,7 +33,7 @@
     _nworking_stage1(0) {}
 
 inline bool ZMarkTerminate::enter_stage(volatile uint* nworking_stage) {
-  return Atomic::sub(1u, nworking_stage) == 0;
+  return Atomic::sub(nworking_stage, 1u) == 0;
 }
 
 inline void ZMarkTerminate::exit_stage(volatile uint* nworking_stage) {
--- a/src/hotspot/share/memory/metaspace.cpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/memory/metaspace.cpp	Mon Nov 25 12:32:07 2019 +0100
@@ -180,7 +180,7 @@
 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
   assert_is_aligned(v, Metaspace::commit_alignment());
 
-  return Atomic::sub(v, &_capacity_until_GC);
+  return Atomic::sub(&_capacity_until_GC, v);
 }
 
 void MetaspaceGC::initialize() {
@@ -402,7 +402,7 @@
   assert(size_now >= words, "About to decrement counter below zero "
          "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
          size_now, words);
-  Atomic::sub(words, pstat);
+  Atomic::sub(pstat, words);
 }
 
 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
--- a/src/hotspot/share/runtime/atomic.hpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/runtime/atomic.hpp	Mon Nov 25 12:32:07 2019 +0100
@@ -104,8 +104,8 @@
   inline static D add(D volatile* dest, I add_value,
                       atomic_memory_order order = memory_order_conservative);
 
-  template<typename I, typename D>
-  inline static D sub(I sub_value, D volatile* dest,
+  template<typename D, typename I>
+  inline static D sub(D volatile* dest, I sub_value,
                       atomic_memory_order order = memory_order_conservative);
 
   // Atomically increment location. inc() provide:
@@ -543,8 +543,8 @@
   Atomic::add(dest, I(-1), order);
 }
 
-template<typename I, typename D>
-inline D Atomic::sub(I sub_value, D volatile* dest, atomic_memory_order order) {
+template<typename D, typename I>
+inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) {
   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
   STATIC_ASSERT(IsIntegral<I>::value);
   // If D is a pointer type, use [u]intptr_t as the addend type,
--- a/src/hotspot/share/services/mallocTracker.hpp	Mon Nov 25 12:31:39 2019 +0100
+++ b/src/hotspot/share/services/mallocTracker.hpp	Mon Nov 25 12:32:07 2019 +0100
@@ -66,7 +66,7 @@
     assert(_size >= sz, "deallocation > allocated");
     Atomic::dec(&_count);
     if (sz > 0) {
-      Atomic::sub(sz, &_size);
+      Atomic::sub(&_size, sz);
     }
   }