# HG changeset patch # User stefank # Date 1574681595 -3600 # Node ID 623722a6aeb98e63ade808b1308d7973a57268ce # Parent 4cbfa5077d68ad2e3e0800ce042a45cb3b95accb 8234740: Harmonize parameter order in Atomic - cmpxchg Reviewed-by: rehn, dholmes diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/cpu/arm/stubGenerator_arm.cpp --- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -537,7 +537,8 @@ return start; } - // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value) + // Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value) + // used by Atomic::cmpxchg(volatile jint *dest, jint compare_value, jint exchange_value) // // Arguments : // diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp --- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -883,7 +883,7 @@ // // markWord displaced_header = obj->mark().set_unlocked(); // monitor->lock()->set_displaced_header(displaced_header); - // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { + // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) { // // We stored the monitor address into the object's mark word. // } else if (THREAD->is_lock_owned((address)displaced_header)) // // Simple recursive case. @@ -921,7 +921,7 @@ std(displaced_header, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes(), monitor); - // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { + // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) { // Store stack address of the BasicObjectLock (this is monitor) into object. addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes()); @@ -997,7 +997,7 @@ // if ((displaced_header = monitor->displaced_header()) == NULL) { // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL. // monitor->set_obj(NULL); - // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) { + // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) { // // We swapped the unlocked mark in displaced_header into the object's mark word. // monitor->set_obj(NULL); // } else { @@ -1030,7 +1030,7 @@ cmpdi(CCR0, displaced_header, 0); beq(CCR0, free_slot); // recursive unlock - // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) { + // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) { // // We swapped the unlocked mark in displaced_header into the object's mark word. // monitor->set_obj(NULL); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/cpu/s390/interp_masm_s390.cpp --- a/src/hotspot/cpu/s390/interp_masm_s390.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -976,7 +976,7 @@ // // markWord displaced_header = obj->mark().set_unlocked(); // monitor->lock()->set_displaced_header(displaced_header); - // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { + // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) { // // We stored the monitor address into the object's mark word. // } else if (THREAD->is_lock_owned((address)displaced_header)) // // Simple recursive case. @@ -1011,7 +1011,7 @@ z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes(), monitor); - // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { + // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) { // Store stack address of the BasicObjectLock (this is monitor) into object. add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object); @@ -1082,7 +1082,7 @@ // if ((displaced_header = monitor->displaced_header()) == NULL) { // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL. // monitor->set_obj(NULL); - // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) { + // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) { // // We swapped the unlocked mark in displaced_header into the object's mark word. // monitor->set_obj(NULL); // } else { @@ -1123,7 +1123,7 @@ BasicLock::displaced_header_offset_in_bytes())); z_bre(done); // displaced_header == 0 -> goto done - // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) { + // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) { // // We swapped the unlocked mark in displaced_header into the object's mark word. // monitor->set_obj(NULL); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/cpu/sparc/stubGenerator_sparc.cpp --- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -623,7 +623,8 @@ } - // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) + // Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) + // used by Atomic::cmpxchg(volatile jint* dest, jint compare_value, jint exchange_value) // // Arguments: // @@ -647,7 +648,8 @@ return start; } - // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value) + // Implementation of jlong atomic_cmpxchg_long(jlong exchange_value, volatile jlong *dest, jlong compare_value) + // used by Atomic::cmpxchg(volatile jlong *dest, jlong compare_value, jlong exchange_value) // // Arguments: // diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os/aix/os_aix.cpp --- a/src/hotspot/os/aix/os_aix.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os/aix/os_aix.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -1084,7 +1084,7 @@ if (now <= prev) { return prev; // same or retrograde time; } - jlong obsv = Atomic::cmpxchg(now, &max_real_time, prev); + jlong obsv = Atomic::cmpxchg(&max_real_time, prev, now); assert(obsv >= prev, "invariant"); // Monotonicity // If the CAS succeeded then we're done and return "now". // If the CAS failed and the observed value "obsv" is >= now then @@ -1794,7 +1794,7 @@ for (;;) { for (int i = 0; i < NSIG + 1; i++) { jint n = pending_signals[i]; - if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { + if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) { return i; } } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os/bsd/os_bsd.cpp --- a/src/hotspot/os/bsd/os_bsd.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os/bsd/os_bsd.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -930,7 +930,7 @@ if (now <= prev) { return prev; // same or retrograde time; } - const uint64_t obsv = Atomic::cmpxchg(now, &Bsd::_max_abstime, prev); + const uint64_t obsv = Atomic::cmpxchg(&Bsd::_max_abstime, prev, now); assert(obsv >= prev, "invariant"); // Monotonicity // If the CAS succeeded then we're done and return "now". // If the CAS failed and the observed value "obsv" is >= now then @@ -1833,7 +1833,7 @@ for (;;) { for (int i = 0; i < NSIG + 1; i++) { jint n = pending_signals[i]; - if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { + if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) { return i; } } @@ -3237,7 +3237,7 @@ mapping[i] = -1; } - if (!Atomic::replace_if_null(mapping, &apic_to_processor_mapping)) { + if (!Atomic::replace_if_null(&apic_to_processor_mapping, mapping)) { FREE_C_HEAP_ARRAY(int, mapping); mapping = Atomic::load_acquire(&apic_to_processor_mapping); } @@ -3263,7 +3263,7 @@ int processor_id = Atomic::load(&mapping[apic_id]); while (processor_id < 0) { - if (Atomic::cmpxchg(-2, &mapping[apic_id], -1) == -1) { + if (Atomic::cmpxchg(&mapping[apic_id], -1, -2) == -1) { Atomic::store(&mapping[apic_id], Atomic::add(&next_processor_id, 1) - 1); } processor_id = Atomic::load(&mapping[apic_id]); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os/linux/os_linux.cpp --- a/src/hotspot/os/linux/os_linux.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os/linux/os_linux.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -2752,7 +2752,7 @@ for (;;) { for (int i = 0; i < NSIG + 1; i++) { jint n = pending_signals[i]; - if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { + if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) { return i; } } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os/posix/os_posix.cpp --- a/src/hotspot/os/posix/os_posix.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os/posix/os_posix.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -1900,7 +1900,7 @@ // atomically decrement _event for (;;) { v = _event; - if (Atomic::cmpxchg(v - 1, &_event, v) == v) break; + if (Atomic::cmpxchg(&_event, v, v - 1) == v) break; } guarantee(v >= 0, "invariant"); @@ -1940,7 +1940,7 @@ // atomically decrement _event for (;;) { v = _event; - if (Atomic::cmpxchg(v - 1, &_event, v) == v) break; + if (Atomic::cmpxchg(&_event, v, v - 1) == v) break; } guarantee(v >= 0, "invariant"); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os/solaris/os_solaris.cpp --- a/src/hotspot/os/solaris/os_solaris.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os/solaris/os_solaris.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -1024,7 +1024,7 @@ if (now <= prev) { return prev; // same or retrograde time; } - const hrtime_t obsv = Atomic::cmpxchg(now, &max_hrtime, prev); + const hrtime_t obsv = Atomic::cmpxchg(&max_hrtime, prev, now); assert(obsv >= prev, "invariant"); // Monotonicity // If the CAS succeeded then we're done and return "now". // If the CAS failed and the observed value "obsv" is >= now then @@ -1984,7 +1984,7 @@ while (true) { for (int i = 0; i < Sigexit + 1; i++) { jint n = pending_signals[i]; - if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { + if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) { return i; } } @@ -4710,7 +4710,7 @@ int v; for (;;) { v = _Event; - if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; + if (Atomic::cmpxchg(&_Event, v, v-1) == v) break; } guarantee(v >= 0, "invariant"); if (v == 0) { @@ -4748,7 +4748,7 @@ int v; for (;;) { v = _Event; - if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; + if (Atomic::cmpxchg(&_Event, v, v-1) == v) break; } guarantee(v >= 0, "invariant"); if (v != 0) return OS_OK; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os/windows/os_windows.cpp --- a/src/hotspot/os/windows/os_windows.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os/windows/os_windows.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -2096,7 +2096,7 @@ while (true) { for (int i = 0; i < NSIG + 1; i++) { jint n = pending_signals[i]; - if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { + if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) { return i; } } @@ -3751,7 +3751,7 @@ if (what != EPT_THREAD) { // Atomically set process_exiting before the critical section // to increase the visibility between racing threads. - Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0); + Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId()); } EnterCriticalSection(&crit_sect); @@ -5136,7 +5136,7 @@ int v; for (;;) { v = _Event; - if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; + if (Atomic::cmpxchg(&_Event, v, v-1) == v) break; } guarantee((v == 0) || (v == 1), "invariant"); if (v != 0) return OS_OK; @@ -5198,7 +5198,7 @@ int v; for (;;) { v = _Event; - if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; + if (Atomic::cmpxchg(&_Event, v, v-1) == v) break; } guarantee((v == 0) || (v == 1), "invariant"); if (v != 0) return; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os/windows/threadCritical_windows.cpp --- a/src/hotspot/os/windows/threadCritical_windows.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os/windows/threadCritical_windows.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -56,7 +56,7 @@ if (lock_owner != current_thread) { // Grab the lock before doing anything. - while (Atomic::cmpxchg(0, &lock_count, -1) != -1) { + while (Atomic::cmpxchg(&lock_count, -1, 0) != -1) { if (initialized) { DWORD ret = WaitForSingleObject(lock_event, INFINITE); assert(ret == WAIT_OBJECT_0, "unexpected return value from WaitForSingleObject"); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp --- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -232,9 +232,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); @@ -302,9 +302,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); @@ -352,9 +352,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp --- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -64,9 +64,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ( "lock cmpxchgb %1,(%3)" @@ -78,9 +78,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "lock cmpxchgl %1,(%3)" @@ -120,9 +120,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)" @@ -142,12 +142,12 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); - return cmpxchg_using_helper(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); + return cmpxchg_using_helper(_Atomic_cmpxchg_long, dest, compare_value, exchange_value); } template<> diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s --- a/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s Mon Nov 25 12:33:15 2019 +0100 @@ -633,9 +633,9 @@ ret - # Support for int64_t Atomic::cmpxchg(int64_t exchange_value, + # Support for int64_t Atomic::cmpxchg(int64_t compare_value, # volatile int64_t* dest, - # int64_t compare_value) + # int64_t exchange_value) # .p2align 4,,15 ELF_TYPE(_Atomic_cmpxchg_long,@function) @@ -665,4 +665,3 @@ movl 8(%esp), %eax # dest fistpll (%eax) ret - diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp --- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -239,16 +239,16 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef ARM - return cmpxchg_using_helper(arm_compare_and_swap, exchange_value, dest, compare_value); + return cmpxchg_using_helper(arm_compare_and_swap, dest, compare_value, exchange_value); #else #ifdef M68K - return cmpxchg_using_helper(m68k_compare_and_swap, exchange_value, dest, compare_value); + return cmpxchg_using_helper(m68k_compare_and_swap, dest, compare_value, exchange_value); #else return __sync_val_compare_and_swap(dest, compare_value, exchange_value); #endif // M68K @@ -257,9 +257,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); return __sync_val_compare_and_swap(dest, compare_value, exchange_value); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp --- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -57,9 +57,9 @@ template template -inline T Atomic::PlatformCmpxchg::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(byte_size == sizeof(T)); if (order == memory_order_relaxed) { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp --- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -119,22 +119,22 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); - return cmpxchg_using_helper(reorder_cmpxchg_func, exchange_value, dest, compare_value); + return cmpxchg_using_helper(reorder_cmpxchg_func, dest, compare_value, exchange_value); } template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); - return cmpxchg_using_helper(reorder_cmpxchg_long_func, exchange_value, dest, compare_value); + return cmpxchg_using_helper(reorder_cmpxchg_long_func, dest, compare_value, exchange_value); } #endif // OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp --- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -232,9 +232,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); @@ -302,9 +302,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); @@ -352,9 +352,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp --- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -289,9 +289,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T cmp_val, + T xchg_val, atomic_memory_order unused) const { STATIC_ASSERT(4 == sizeof(T)); T old; @@ -313,9 +313,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T cmp_val, + T xchg_val, atomic_memory_order unused) const { STATIC_ASSERT(8 == sizeof(T)); T old; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp --- a/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -124,9 +124,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); T rv; @@ -140,9 +140,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T rv; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp --- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -64,9 +64,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ("lock cmpxchgb %1,(%3)" @@ -78,9 +78,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ("lock cmpxchgl %1,(%3)" @@ -120,9 +120,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("lock cmpxchgq %1,(%3)" @@ -142,12 +142,12 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); - return cmpxchg_using_helper(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); + return cmpxchg_using_helper(_Atomic_cmpxchg_long, dest, compare_value, exchange_value); } template<> diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/linux_x86/linux_x86_32.s --- a/src/hotspot/os_cpu/linux_x86/linux_x86_32.s Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/linux_x86/linux_x86_32.s Mon Nov 25 12:33:15 2019 +0100 @@ -1,4 +1,4 @@ -# +# # Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # @@ -19,15 +19,15 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# - + # NOTE WELL! The _Copy functions are called directly # from server-compiler-generated code via CallLeafNoFP, # which means that they *must* either not use floating # point or use it in the same manner as does the server # compiler. - + .globl _Copy_conjoint_bytes .globl _Copy_arrayof_conjoint_bytes .globl _Copy_conjoint_jshorts_atomic @@ -174,7 +174,7 @@ leal -1(%esi,%ecx),%eax # from + count - 1 jbe acb_CopyRight cmpl %eax,%edi - jbe acb_CopyLeft + jbe acb_CopyLeft # copy from low to high acb_CopyRight: cmpl $3,%ecx @@ -262,7 +262,7 @@ leal -2(%esi,%ecx,2),%eax # from + count*2 - 2 jbe cs_CopyRight cmpl %eax,%edi - jbe cs_CopyLeft + jbe cs_CopyLeft # copy from low to high cs_CopyRight: # align source address at dword address boundary @@ -283,7 +283,7 @@ jbe 2f # <= 32 dwords # copy aligned dwords rep; smovl - jmp 4f + jmp 4f # copy aligned dwords 2: subl %esi,%edi .p2align 4,,15 @@ -349,7 +349,7 @@ leal -2(%esi,%ecx,2),%eax # from + count*2 - 2 jbe acs_CopyRight cmpl %eax,%edi - jbe acs_CopyLeft + jbe acs_CopyLeft acs_CopyRight: movl %ecx,%eax # word count sarl %ecx # dword count @@ -358,10 +358,10 @@ jbe 2f # <= 32 dwords # copy aligned dwords rep; smovl - jmp 4f + jmp 4f # copy aligned dwords .space 5 -2: subl %esi,%edi +2: subl %esi,%edi .p2align 4,,15 3: movl (%esi),%edx movl %edx,(%edi,%esi,1) @@ -428,7 +428,7 @@ leal -4(%esi,%ecx,4),%eax # from + count*4 - 4 jbe ci_CopyRight cmpl %eax,%edi - jbe ci_CopyLeft + jbe ci_CopyLeft ci_CopyRight: cmpl $32,%ecx jbe 2f # <= 32 dwords @@ -471,7 +471,7 @@ popl %edi popl %esi ret - + # Support for void Copy::conjoint_jlongs_atomic(jlong* from, # jlong* to, # size_t count) @@ -537,7 +537,7 @@ je 5f cmpl $33,%ecx jae 3f -1: subl %esi,%edi +1: subl %esi,%edi .p2align 4,,15 2: movl (%esi),%edx movl %edx,(%edi,%esi,1) @@ -545,7 +545,7 @@ subl $1,%ecx jnz 2b addl %esi,%edi - jmp 5f + jmp 5f 3: smovl # align to 8 bytes, we know we are 4 byte aligned to start subl $1,%ecx 4: .p2align 4,,15 @@ -612,9 +612,9 @@ ret - # Support for jlong Atomic::cmpxchg(jlong exchange_value, - # volatile jlong* dest, - # jlong compare_value) + # Support for jlong Atomic::cmpxchg(volatile jlong* dest, + # jlong compare_value, + # jlong exchange_value) # .p2align 4,,15 .type _Atomic_cmpxchg_long,@function @@ -643,4 +643,3 @@ movl 8(%esp), %eax # dest fistpll (%eax) ret - diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp --- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -93,9 +93,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); return __sync_val_compare_and_swap(dest, compare_value, exchange_value); @@ -103,9 +103,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); return __sync_val_compare_and_swap(dest, compare_value, exchange_value); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp --- a/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -35,7 +35,7 @@ D old_value = *dest; while (true) { D new_value = old_value + add_value; - D result = cmpxchg(new_value, dest, old_value); + D result = cmpxchg(dest, old_value, new_value); if (result == old_value) break; old_value = result; } @@ -64,7 +64,7 @@ STATIC_ASSERT(8 == sizeof(T)); T old_value = *dest; while (true) { - T result = cmpxchg(exchange_value, dest, old_value); + T result = cmpxchg(dest, old_value, exchange_value); if (result == old_value) break; old_value = result; } @@ -77,9 +77,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); T rv; @@ -93,9 +93,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T rv; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp --- a/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -104,9 +104,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); return PrimitiveConversions::cast( @@ -117,9 +117,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); return PrimitiveConversions::cast( @@ -130,9 +130,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); return PrimitiveConversions::cast( diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il --- a/src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il Mon Nov 25 12:33:15 2019 +0100 @@ -81,27 +81,27 @@ movq %rdi, %rax .end - // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, - // volatile jbyte *dest, - // jbyte compare_value) + // Support for jbyte Atomic::cmpxchg(volatile jbyte *dest, + // jbyte compare_value, + // jbyte exchange_value) .inline _Atomic_cmpxchg_byte,3 movb %dl, %al // compare_value lock cmpxchgb %dil, (%rsi) .end - // Support for jint Atomic::cmpxchg(jint exchange_value, - // volatile jint *dest, - // jint compare_value) + // Support for jint Atomic::cmpxchg(volatile jint *dest, + // int compare_value, + // jint exchange_value) .inline _Atomic_cmpxchg,3 movl %edx, %eax // compare_value lock cmpxchgl %edi, (%rsi) .end - // Support for jlong Atomic::cmpxchg(jlong exchange_value, - // volatile jlong* dest, - // jlong compare_value) + // Support for jlong Atomic::cmpxchg(volatile jlong* dest, + // jlong compare_value, + // jlong exchange_value) .inline _Atomic_cmpxchg_long,3 movq %rdx, %rax // compare_value lock diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp --- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -91,15 +91,15 @@ #undef DEFINE_STUB_XCHG -#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ - template<> \ - template \ - inline T Atomic::PlatformCmpxchg::operator()(T exchange_value, \ - T volatile* dest, \ - T compare_value, \ +#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ + template<> \ + template \ + inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest, \ + T compare_value, \ + T exchange_value, \ atomic_memory_order order) const { \ - STATIC_ASSERT(ByteSize == sizeof(T)); \ - return cmpxchg_using_helper(StubName, exchange_value, dest, compare_value); \ + STATIC_ASSERT(ByteSize == sizeof(T)); \ + return cmpxchg_using_helper(StubName, dest, compare_value, exchange_value); \ } DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func) @@ -141,9 +141,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); // alternative for InterlockedCompareExchange @@ -157,9 +157,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // alternative for InterlockedCompareExchange @@ -173,9 +173,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); int32_t ex_lo = (int32_t)exchange_value; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/aot/aotCodeHeap.cpp --- a/src/hotspot/share/aot/aotCodeHeap.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/aot/aotCodeHeap.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -347,7 +347,7 @@ AOTCompiledMethod *aot = new AOTCompiledMethod(code, mh(), meta, metadata_table, metadata_size, state_adr, this, name, code_id, _aot_id); assert(_code_to_aot[code_id]._aot == NULL, "should be not initialized"); _code_to_aot[code_id]._aot = aot; // Should set this first - if (Atomic::cmpxchg(in_use, &_code_to_aot[code_id]._state, not_set) != not_set) { + if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, in_use) != not_set) { _code_to_aot[code_id]._aot = NULL; // Clean } else { // success // Publish method @@ -410,7 +410,7 @@ AOTCompiledMethod* aot = new AOTCompiledMethod(entry, NULL, meta, metadata_table, metadata_size, state_adr, this, full_name, code_id, i); assert(_code_to_aot[code_id]._aot == NULL, "should be not initialized"); _code_to_aot[code_id]._aot = aot; - if (Atomic::cmpxchg(in_use, &_code_to_aot[code_id]._state, not_set) != not_set) { + if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, in_use) != not_set) { fatal("stab '%s' code state is %d", full_name, _code_to_aot[code_id]._state); } // Adjust code buffer boundaries only for stubs because they are last in the buffer. @@ -721,7 +721,7 @@ for (int i = 0; i < methods_cnt; ++i) { int code_id = indexes[i]; // Invalidate aot code. - if (Atomic::cmpxchg(invalid, &_code_to_aot[code_id]._state, not_set) != not_set) { + if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, invalid) != not_set) { if (_code_to_aot[code_id]._state == in_use) { AOTCompiledMethod* aot = _code_to_aot[code_id]._aot; assert(aot != NULL, "aot should be set"); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/classfile/classLoaderData.cpp --- a/src/hotspot/share/classfile/classLoaderData.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/classfile/classLoaderData.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -273,7 +273,7 @@ return; } int new_claim = old_claim & ~claim; - if (Atomic::cmpxchg(new_claim, &_claim, old_claim) == old_claim) { + if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) { return; } } @@ -286,7 +286,7 @@ return false; } int new_claim = old_claim | claim; - if (Atomic::cmpxchg(new_claim, &_claim, old_claim) == old_claim) { + if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) { return true; } } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/classfile/classLoaderDataGraph.cpp --- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -676,7 +676,7 @@ while (head != NULL) { Klass* next = next_klass_in_cldg(head); - Klass* old_head = Atomic::cmpxchg(next, &_next_klass, head); + Klass* old_head = Atomic::cmpxchg(&_next_klass, head, next); if (old_head == head) { return head; // Won the CAS. diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/code/codeCache.cpp --- a/src/hotspot/share/code/codeCache.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/code/codeCache.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -749,7 +749,7 @@ for (;;) { ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list); entry->set_purge_list_next(purge_list_head); - if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, purge_list_head) == purge_list_head) { + if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) { break; } } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/code/compiledMethod.cpp --- a/src/hotspot/share/code/compiledMethod.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/code/compiledMethod.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -133,7 +133,7 @@ // next pointers always point at live ExceptionCaches, that are not removed due // to concurrent ExceptionCache cleanup. ExceptionCache* next = ec->next(); - if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) { + if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) { CodeCache::release_exception_cache(ec); } continue; @@ -143,7 +143,7 @@ new_entry->set_next(ec); } } - if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) { + if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) { return; } } @@ -176,7 +176,7 @@ // Try to clean head; this is contended by concurrent inserts, that // both lazily clean the head, and insert entries at the head. If // the CAS fails, the operation is restarted. - if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) { + if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) { prev = NULL; curr = exception_cache_acquire(); continue; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/code/dependencyContext.cpp --- a/src/hotspot/share/code/dependencyContext.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/code/dependencyContext.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -101,7 +101,7 @@ for (;;) { nmethodBucket* head = Atomic::load(_dependency_context_addr); new_head->set_next(head); - if (Atomic::cmpxchg(new_head, _dependency_context_addr, head) == head) { + if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) { break; } } @@ -124,7 +124,7 @@ for (;;) { nmethodBucket* purge_list_head = Atomic::load(&_purge_list); b->set_purge_list_next(purge_list_head); - if (Atomic::cmpxchg(b, &_purge_list, purge_list_head) == purge_list_head) { + if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) { break; } } @@ -272,7 +272,7 @@ if (last_cleanup >= cleaning_epoch) { return false; } - return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup; + return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup; } // Retrieve the first nmethodBucket that has a dependent that does not correspond to @@ -291,7 +291,7 @@ // Unstable load of head w.r.t. head->next continue; } - if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) { + if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) { // Release is_unloading entries if unlinking was claimed DependencyContext::release(head); } @@ -345,7 +345,7 @@ // Unstable load of next w.r.t. next->next continue; } - if (Atomic::cmpxchg(next_next, &_next, next) == next) { + if (Atomic::cmpxchg(&_next, next, next_next) == next) { // Release is_unloading entries if unlinking was claimed DependencyContext::release(next); } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/code/nmethod.cpp --- a/src/hotspot/share/code/nmethod.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/code/nmethod.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -1150,7 +1150,7 @@ // Ensure monotonicity of transitions. return false; } - if (Atomic::cmpxchg(new_state, &_state, old_state) == old_state) { + if (Atomic::cmpxchg(&_state, old_state, new_state) == old_state) { return true; } } @@ -1849,7 +1849,7 @@ assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); if ((_oops_do_mark_link == NULL) && - (Atomic::replace_if_null(mark_link(this, claim_weak_request_tag), &_oops_do_mark_link))) { + (Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) { oops_do_log_change("oops_do, mark weak request"); return true; } @@ -1863,7 +1863,7 @@ nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() { assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); - oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(this, claim_strong_done_tag), &_oops_do_mark_link, mark_link(NULL, claim_weak_request_tag)); + oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, mark_link(NULL, claim_weak_request_tag), mark_link(this, claim_strong_done_tag)); if (old_next == NULL) { oops_do_log_change("oops_do, mark strong done"); } @@ -1874,7 +1874,7 @@ assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak"); - oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(this, claim_strong_request_tag), &_oops_do_mark_link, next); + oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag)); if (old_next == next) { oops_do_log_change("oops_do, mark strong request"); } @@ -1885,7 +1885,7 @@ assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done"); - oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(extract_nmethod(next), claim_strong_done_tag), &_oops_do_mark_link, next); + oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag)); if (old_next == next) { oops_do_log_change("oops_do, mark weak done -> mark strong done"); return true; @@ -1906,7 +1906,7 @@ old_head = this; } // Try to install end of list and weak done tag. - if (Atomic::cmpxchg(mark_link(old_head, claim_weak_done_tag), &_oops_do_mark_link, mark_link(this, claim_weak_request_tag)) == mark_link(this, claim_weak_request_tag)) { + if (Atomic::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) { oops_do_log_change("oops_do, mark weak done"); return NULL; } else { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/compiler/compileBroker.hpp --- a/src/hotspot/share/compiler/compileBroker.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/compiler/compileBroker.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -335,7 +335,7 @@ static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); } static bool set_should_compile_new_jobs(jint new_state) { // Return success if the current caller set it - jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state); + jint old = Atomic::cmpxchg(&_should_compile_new_jobs, 1-new_state, new_state); bool success = (old == (1-new_state)); if (success) { if (new_state == run_compilation) { @@ -359,7 +359,7 @@ static void handle_full_code_cache(int code_blob_type); // Ensures that warning is only printed once. static bool should_print_compiler_warning() { - jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0); + jint old = Atomic::cmpxchg(&_print_compilation_warning, 0, 1); return old == 0; } // Return total compilation ticks diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/epsilon/epsilonHeap.cpp --- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -156,7 +156,7 @@ // Allocation successful, update counters { size_t last = _last_counter_update; - if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) { + if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) { _monitoring_support->update_counters(); } } @@ -164,7 +164,7 @@ // ...and print the occupancy line, if needed { size_t last = _last_heap_print; - if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) { + if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) { print_heap_info(used); print_metaspace_info(); } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp --- a/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -170,7 +170,7 @@ void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) { for (;;) { table->_purge_next = _purge_list; - G1CodeRootSetTable* old = Atomic::cmpxchg(table, &_purge_list, table->_purge_next); + G1CodeRootSetTable* old = Atomic::cmpxchg(&_purge_list, table->_purge_next, table); if (old == table->_purge_next) { break; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/g1/g1CollectedHeap.cpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -3377,7 +3377,7 @@ BufferNode* next = Atomic::load(&_nodes); while (next != NULL) { BufferNode* node = next; - next = Atomic::cmpxchg(node->next(), &_nodes, node); + next = Atomic::cmpxchg(&_nodes, node, node->next()); if (next == node) { cl->apply_to_buffer(node, buffer_size, worker_id); next = node->next(); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/g1/g1ConcurrentMark.cpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -1906,7 +1906,7 @@ HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; // Is the gap between reading the finger and doing the CAS too long? - HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); + HeapWord* res = Atomic::cmpxchg(&_finger, finger, end); if (res == finger && curr_region != NULL) { // we succeeded HeapWord* bottom = curr_region->bottom(); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/g1/g1FreeIdSet.cpp --- a/src/hotspot/share/gc/g1/g1FreeIdSet.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1FreeIdSet.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -79,7 +79,7 @@ index = head_index(old_head); assert(index < _size, "invariant"); uintx new_head = make_head(_next[index], old_head); - new_head = Atomic::cmpxchg(new_head, &_head, old_head); + new_head = Atomic::cmpxchg(&_head, old_head, new_head); if (new_head == old_head) break; old_head = new_head; } @@ -95,7 +95,7 @@ while (true) { _next[index] = head_index(old_head); uintx new_head = make_head(index, old_head); - new_head = Atomic::cmpxchg(new_head, &_head, old_head); + new_head = Atomic::cmpxchg(&_head, old_head, new_head); if (new_head == old_head) break; old_head = new_head; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/g1/g1HotCardCache.cpp --- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -78,9 +78,9 @@ // card_ptr in favor of the other option, which would be starting over. This // should be OK since card_ptr will likely be the older card already when/if // this ever happens. - CardValue* previous_ptr = Atomic::cmpxchg(card_ptr, - &_hot_cache[masked_index], - current_ptr); + CardValue* previous_ptr = Atomic::cmpxchg(&_hot_cache[masked_index], + current_ptr, + card_ptr); return (previous_ptr == current_ptr) ? previous_ptr : card_ptr; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/g1/g1ParallelCleaning.cpp --- a/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -39,7 +39,7 @@ return false; } - return Atomic::cmpxchg(1, &_cleaning_claimed, 0) == 0; + return Atomic::cmpxchg(&_cleaning_claimed, 0, 1) == 0; } void JVMCICleaningTask::work(bool unloading_occurred) { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/g1/g1RemSet.cpp --- a/src/hotspot/share/gc/g1/g1RemSet.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -177,7 +177,7 @@ return; } - bool marked_as_dirty = Atomic::cmpxchg(true, &_contains[region], false) == false; + bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false; if (marked_as_dirty) { uint allocated = Atomic::add(&_cur_idx, 1u) - 1; _buffer[allocated] = region; @@ -437,7 +437,7 @@ if (_collection_set_iter_state[region]) { return false; } - return !Atomic::cmpxchg(true, &_collection_set_iter_state[region], false); + return !Atomic::cmpxchg(&_collection_set_iter_state[region], false, true); } bool has_cards_to_scan(uint region) { @@ -1137,7 +1137,7 @@ if (_initial_evacuation && p->fast_reclaim_humongous_candidates() > 0 && !_fast_reclaim_handled && - !Atomic::cmpxchg(true, &_fast_reclaim_handled, false)) { + !Atomic::cmpxchg(&_fast_reclaim_handled, false, true)) { G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/g1/heapRegion.inline.hpp --- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -61,7 +61,7 @@ size_t want_to_allocate = MIN2(available, desired_word_size); if (want_to_allocate >= min_word_size) { HeapWord* new_top = obj + want_to_allocate; - HeapWord* result = Atomic::cmpxchg(new_top, &_top, obj); + HeapWord* result = Atomic::cmpxchg(&_top, obj, new_top); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/g1/heapRegionManager.cpp --- a/src/hotspot/share/gc/g1/heapRegionManager.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -609,6 +609,6 @@ bool HeapRegionClaimer::claim_region(uint region_index) { assert(region_index < _n_regions, "Invalid index."); - uint old_val = Atomic::cmpxchg(Claimed, &_claims[region_index], Unclaimed); + uint old_val = Atomic::cmpxchg(&_claims[region_index], Unclaimed, Claimed); return old_val == Unclaimed; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/g1/heapRegionRemSet.cpp --- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -46,7 +46,7 @@ PerRegionTable* fl = _free_list; while (fl != NULL) { PerRegionTable* nxt = fl->next(); - PerRegionTable* res = Atomic::cmpxchg(nxt, &_free_list, fl); + PerRegionTable* res = Atomic::cmpxchg(&_free_list, fl, nxt); if (res == fl) { fl->init(hr, true); return fl; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/g1/heapRegionRemSet.hpp --- a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -229,7 +229,7 @@ while (true) { PerRegionTable* fl = _free_list; last->set_next(fl); - PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl); + PerRegionTable* res = Atomic::cmpxchg(&_free_list, fl, prt); if (res == fl) { return; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/parallel/mutableNUMASpace.cpp --- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -864,7 +864,7 @@ if (p != NULL) { HeapWord* cur_top, *cur_chunk_top = p + size; while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated. - if (Atomic::cmpxchg(cur_chunk_top, top_addr(), cur_top) == cur_top) { + if (Atomic::cmpxchg(top_addr(), cur_top, cur_chunk_top) == cur_top) { break; } } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/parallel/mutableSpace.cpp --- a/src/hotspot/share/gc/parallel/mutableSpace.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -194,7 +194,7 @@ HeapWord* obj = top(); if (pointer_delta(end(), obj) >= size) { HeapWord* new_top = obj + size; - HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); + HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. @@ -213,7 +213,7 @@ // Try to deallocate previous allocation. Returns true upon success. bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) { HeapWord* expected_top = obj + size; - return Atomic::cmpxchg(obj, top_addr(), expected_top) == expected_top; + return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top; } void MutableSpace::oop_iterate(OopIterateClosure* cl) { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/parallel/psParallelCompact.hpp --- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -584,7 +584,7 @@ #ifdef ASSERT HeapWord* tmp = _highest_ref; while (addr > tmp) { - tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp); + tmp = Atomic::cmpxchg(&_highest_ref, tmp, addr); } #endif // #ifdef ASSERT } @@ -592,7 +592,7 @@ inline bool ParallelCompactData::RegionData::claim() { const region_sz_t los = static_cast(live_obj_size()); - const region_sz_t old = Atomic::cmpxchg(dc_claimed | los, &_dc_and_los, los); + const region_sz_t old = Atomic::cmpxchg(&_dc_and_los, los, dc_claimed | los); return old == los; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/barrierSet.hpp --- a/src/hotspot/share/gc/shared/barrierSet.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/barrierSet.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -211,13 +211,13 @@ } template - static T atomic_cmpxchg_in_heap(T new_value, T* addr, T compare_value) { - return Raw::atomic_cmpxchg(new_value, addr, compare_value); + static T atomic_cmpxchg_in_heap(T* addr, T compare_value, T new_value) { + return Raw::atomic_cmpxchg(addr, compare_value, new_value); } template - static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { - return Raw::atomic_cmpxchg_at(new_value, base, offset, compare_value); + static T atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, T compare_value, T new_value) { + return Raw::atomic_cmpxchg_at(base, offset, compare_value, new_value); } template @@ -261,12 +261,12 @@ } template - static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) { - return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); + static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) { + return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value); } - static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) { - return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value); + static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) { + return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value); } template @@ -297,8 +297,8 @@ } template - static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) { - return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); + static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) { + return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value); } template diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/cardTableRS.cpp --- a/src/hotspot/share/gc/shared/cardTableRS.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/cardTableRS.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -139,7 +139,7 @@ if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) || _ct->is_prev_youngergen_card_val(entry_val)) { CardValue res = - Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val); + Atomic::cmpxchg(entry, entry_val, CardTableRS::clean_card_val()); if (res == entry_val) { break; } else { @@ -264,7 +264,7 @@ // Mark it as both cur and prev youngergen; card cleaning thread will // eventually remove the previous stuff. CardValue new_val = cur_youngergen_and_prev_nonclean_card; - CardValue res = Atomic::cmpxchg(new_val, entry, entry_val); + CardValue res = Atomic::cmpxchg(entry, entry_val, new_val); // Did the CAS succeed? if (res == entry_val) return; // Otherwise, retry, to see the new value. diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/modRefBarrierSet.hpp --- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -79,7 +79,7 @@ template static void oop_store_in_heap(T* addr, oop value); template - static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value); + static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value); template static oop oop_atomic_xchg_in_heap(T* addr, oop new_value); @@ -98,8 +98,8 @@ return oop_atomic_xchg_in_heap(AccessInternal::oop_field_addr(base, offset), new_value); } - static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) { - return oop_atomic_cmpxchg_in_heap(new_value, AccessInternal::oop_field_addr(base, offset), compare_value); + static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) { + return oop_atomic_cmpxchg_in_heap(AccessInternal::oop_field_addr(base, offset), compare_value, new_value); } }; }; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp --- a/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -67,10 +67,10 @@ template template inline oop ModRefBarrierSet::AccessBarrier:: -oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) { +oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) { BarrierSetT *bs = barrier_set_cast(barrier_set()); bs->template write_ref_field_pre(addr); - oop result = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); + oop result = Raw::oop_atomic_cmpxchg(addr, compare_value, new_value); if (result == compare_value) { bs->template write_ref_field_post(addr, new_value); } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/oopStorage.cpp --- a/src/hotspot/share/gc/shared/oopStorage.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/oopStorage.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -307,7 +307,7 @@ assert(!is_full_bitmask(allocated), "attempt to allocate from full block"); unsigned index = count_trailing_zeros(~allocated); uintx new_value = allocated | bitmask_for_index(index); - uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated); + uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, allocated, new_value); if (fetched == allocated) { return get_pointer(index); // CAS succeeded; return entry for index. } @@ -595,7 +595,7 @@ while (true) { assert((releasing & ~old_allocated) == 0, "releasing unallocated entries"); uintx new_value = old_allocated ^ releasing; - uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated); + uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, old_allocated, new_value); if (fetched == old_allocated) break; // Successful update. old_allocated = fetched; // Retry with updated bitmask. } @@ -614,12 +614,12 @@ // then someone else has made such a claim and the deferred update has not // yet been processed and will include our change, so we don't need to do // anything further. - if (Atomic::replace_if_null(this, &_deferred_updates_next)) { + if (Atomic::replace_if_null(&_deferred_updates_next, this)) { // Successfully claimed. Push, with self-loop for end-of-list. Block* head = owner->_deferred_updates; while (true) { _deferred_updates_next = (head == NULL) ? this : head; - Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head); + Block* fetched = Atomic::cmpxchg(&owner->_deferred_updates, head, this); if (fetched == head) break; // Successful update. head = fetched; // Retry with updated head. } @@ -651,7 +651,7 @@ // Try atomic pop of block from list. Block* tail = block->deferred_updates_next(); if (block == tail) tail = NULL; // Handle self-loop end marker. - Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block); + Block* fetched = Atomic::cmpxchg(&_deferred_updates, block, tail); if (fetched == block) break; // Update successful. block = fetched; // Retry with updated block. } @@ -825,7 +825,7 @@ // Set the request flag false and return its old value. // Needs to be atomic to avoid dropping a concurrent request. // Can't use Atomic::xchg, which may not support bool. - return Atomic::cmpxchg(false, &needs_cleanup_requested, true); + return Atomic::cmpxchg(&needs_cleanup_requested, true, false); } // Record that cleanup is needed, without notifying the Service thread. diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/parallelCleaning.cpp --- a/src/hotspot/share/gc/shared/parallelCleaning.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/parallelCleaning.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -94,7 +94,7 @@ } } - } while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first); + } while (Atomic::cmpxchg(&_claimed_nmethod, first, last.method()) != first); } void CodeCacheUnloadingTask::work(uint worker_id) { @@ -130,7 +130,7 @@ return false; } - return Atomic::cmpxchg(1, &_clean_klass_tree_claimed, 0) == 0; + return Atomic::cmpxchg(&_clean_klass_tree_claimed, 0, 1) == 0; } InstanceKlass* KlassCleaningTask::claim_next_klass() { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/ptrQueue.cpp --- a/src/hotspot/share/gc/shared/ptrQueue.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/ptrQueue.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -197,7 +197,7 @@ bool BufferNode::Allocator::try_transfer_pending() { // Attempt to claim the lock. if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail. - Atomic::cmpxchg(true, &_transfer_lock, false)) { + Atomic::cmpxchg(&_transfer_lock, false, true)) { return false; } // Have the lock; perform the transfer. diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/referenceProcessor.cpp --- a/src/hotspot/share/gc/shared/referenceProcessor.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -1031,7 +1031,7 @@ // The last ref must have its discovered field pointing to itself. oop next_discovered = (current_head != NULL) ? current_head : obj; - oop retest = HeapAccess::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL)); + oop retest = HeapAccess::oop_atomic_cmpxchg(discovered_addr, oop(NULL), next_discovered); if (retest == NULL) { // This thread just won the right to enqueue the object. diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/satbMarkQueue.cpp --- a/src/hotspot/share/gc/shared/satbMarkQueue.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -136,7 +136,7 @@ value += 2; assert(value > old, "overflow"); if (value > threshold) value |= 1; - value = Atomic::cmpxchg(value, cfptr, old); + value = Atomic::cmpxchg(cfptr, old, value); } while (value != old); } @@ -149,7 +149,7 @@ old = value; value -= 2; if (value <= 1) value = 0; - value = Atomic::cmpxchg(value, cfptr, old); + value = Atomic::cmpxchg(cfptr, old, value); } while (value != old); } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/space.cpp --- a/src/hotspot/share/gc/shared/space.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/space.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -554,7 +554,7 @@ HeapWord* obj = top(); if (pointer_delta(end(), obj) >= size) { HeapWord* new_top = obj + size; - HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); + HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/taskqueue.cpp --- a/src/hotspot/share/gc/shared/taskqueue.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/taskqueue.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -244,7 +244,7 @@ return true; } expected_value = current_offered; - } while ((current_offered = Atomic::cmpxchg(current_offered - 1, &_offered_termination, current_offered)) != expected_value); + } while ((current_offered = Atomic::cmpxchg(&_offered_termination, current_offered, current_offered - 1)) != expected_value); assert(_offered_termination < _n_threads, "Invariant"); return false; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/taskqueue.inline.hpp --- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -321,7 +321,7 @@ template inline typename TaskQueueSuper::Age TaskQueueSuper::Age::cmpxchg(const Age new_age, const Age old_age) volatile { - return Atomic::cmpxchg(new_age._data, &_data, old_age._data); + return Atomic::cmpxchg(&_data, old_age._data, new_age._data); } template diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shared/workgroup.cpp --- a/src/hotspot/share/gc/shared/workgroup.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shared/workgroup.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -426,7 +426,7 @@ assert(t < _n_tasks, "bad task id."); uint old = _tasks[t]; if (old == 0) { - old = Atomic::cmpxchg(1u, &_tasks[t], 0u); + old = Atomic::cmpxchg(&_tasks[t], 0u, 1u); } bool res = old == 0; #ifdef ASSERT @@ -443,7 +443,7 @@ uint old; do { old = observed; - observed = Atomic::cmpxchg(old+1, &_threads_completed, old); + observed = Atomic::cmpxchg(&_threads_completed, old, old+1); } while (observed != old); // If this was the last thread checking in, clear the tasks. uint adjusted_thread_count = (n_threads == 0 ? 1 : n_threads); @@ -471,7 +471,7 @@ bool SequentialSubTasksDone::try_claim_task(uint& t) { t = _n_claimed; while (t < _n_tasks) { - uint res = Atomic::cmpxchg(t+1, &_n_claimed, t); + uint res = Atomic::cmpxchg(&_n_claimed, t, t+1); if (res == t) { return true; } @@ -483,7 +483,7 @@ bool SequentialSubTasksDone::all_tasks_completed() { uint complete = _n_completed; while (true) { - uint res = Atomic::cmpxchg(complete+1, &_n_completed, complete); + uint res = Atomic::cmpxchg(&_n_completed, complete, complete+1); if (res == complete) { break; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -142,7 +142,7 @@ typedef BarrierSet::AccessBarrier Raw; template - static oop oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value); + static oop oop_atomic_cmpxchg_in_heap_impl(T* addr, oop compare_value, oop new_value); template static oop oop_atomic_xchg_in_heap_impl(T* addr, oop new_value); @@ -160,8 +160,8 @@ static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value); template - static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value); - static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value); + static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value); + static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value); template static oop oop_atomic_xchg_in_heap(T* addr, oop new_value); @@ -184,7 +184,7 @@ static void oop_store_not_in_heap(T* addr, oop value); template - static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value); + static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value); template static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -99,12 +99,12 @@ template template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) { +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) { oop res; oop expected = compare_value; do { compare_value = expected; - res = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); + res = Raw::oop_atomic_cmpxchg(addr, compare_value, new_value); expected = res; } while ((compare_value != expected) && (resolve_forwarded(compare_value) == resolve_forwarded(expected))); if (res != NULL) { @@ -116,9 +116,9 @@ template template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value) { +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_impl(T* addr, oop compare_value, oop new_value) { ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value); - oop result = oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value); + oop result = oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value); const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0; if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) && (result == compare_value) && @@ -130,15 +130,15 @@ template template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) { - oop result = oop_atomic_cmpxchg_in_heap_impl(new_value, addr, compare_value); +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) { + oop result = oop_atomic_cmpxchg_in_heap_impl(addr, compare_value, new_value); keep_alive_if_weak(decorators, result); return result; } template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) { - oop result = oop_atomic_cmpxchg_in_heap_impl(new_value, AccessInternal::oop_field_addr(base, offset), compare_value); +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) { + oop result = oop_atomic_cmpxchg_in_heap_impl(AccessInternal::oop_field_addr(base, offset), compare_value, new_value); keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), result); return result; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -147,7 +147,7 @@ resolved = _heap->evacuate_object(obj, _thread); } - Atomic::cmpxchg(resolved, p, obj); + Atomic::cmpxchg(p, obj, resolved); } } } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -81,7 +81,7 @@ int current = count++; if ((current & stride_mask) == 0) { process_block = (current >= _claimed_idx) && - (Atomic::cmpxchg(current + stride, &_claimed_idx, current) == current); + (Atomic::cmpxchg(&_claimed_idx, current, current + stride) == current); } if (process_block) { if (cb->is_alive()) { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -146,7 +146,7 @@ while(index < num_regions) { if (is_in(index)) { - jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current); + jint cur = Atomic::cmpxchg(&_current_index, saved_current, (jint)(index + 1)); assert(cur >= (jint)saved_current, "Must move forward"); if (cur == saved_current) { assert(is_in(index), "Invariant"); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -59,7 +59,7 @@ } while (true) { - jint other = Atomic::cmpxchg(threads_in_evac + 1, &_threads_in_evac, threads_in_evac); + jint other = Atomic::cmpxchg(&_threads_in_evac, threads_in_evac, threads_in_evac + 1); if (other == threads_in_evac) { // Success: caller may safely enter evacuation DEBUG_ONLY(ShenandoahThreadLocalData::set_evac_allowed(Thread::current(), true)); @@ -98,8 +98,7 @@ jint threads_in_evac = Atomic::load_acquire(&_threads_in_evac); while (true) { - jint other = Atomic::cmpxchg((threads_in_evac - 1) | OOM_MARKER_MASK, - &_threads_in_evac, threads_in_evac); + jint other = Atomic::cmpxchg(&_threads_in_evac, threads_in_evac, (threads_in_evac - 1) | OOM_MARKER_MASK); if (other == threads_in_evac) { // Success: wait for other threads to get out of the protocol and return. wait_for_no_evac_threads(); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -131,20 +131,20 @@ inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) { assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); - return (oop) Atomic::cmpxchg(n, addr, c); + return (oop) Atomic::cmpxchg(addr, c, n); } inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) { assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); narrowOop val = CompressedOops::encode(n); - return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, c)); + return CompressedOops::decode((narrowOop) Atomic::cmpxchg(addr, c, val)); } inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) { assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); narrowOop cmp = CompressedOops::encode(c); narrowOop val = CompressedOops::encode(n); - return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp)); + return CompressedOops::decode((narrowOop) Atomic::cmpxchg(addr, cmp, val)); } template diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -76,7 +76,7 @@ jlong current = os::javaTimeMillis(); jlong last = _last_sample_millis; if (current - last > ShenandoahRegionSamplingRate && - Atomic::cmpxchg(current, &_last_sample_millis, last) == last) { + Atomic::cmpxchg(&_last_sample_millis, last, current) == last) { ShenandoahHeap* heap = ShenandoahHeap::heap(); jlong status = 0; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -97,7 +97,7 @@ while(index < num_regions) { if (_set->is_in(index)) { - jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current); + jint cur = Atomic::cmpxchg(&_current_index, saved_current, (jint)(index + 1)); assert(cur >= (jint)saved_current, "Must move forward"); if (cur == saved_current) { assert(_set->is_in(index), "Invariant"); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -210,7 +210,7 @@ return false; } new_val = cur - tax; - } while (Atomic::cmpxchg(new_val, &_budget, cur) != cur); + } while (Atomic::cmpxchg(&_budget, cur, new_val) != cur); return true; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -74,7 +74,7 @@ if (is_set()) { return false; } - ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)SET, &value, (ShenandoahSharedValue)UNSET); + ShenandoahSharedValue old = Atomic::cmpxchg(&value, (ShenandoahSharedValue)UNSET, (ShenandoahSharedValue)SET); return old == UNSET; // success } @@ -82,7 +82,7 @@ if (!is_set()) { return false; } - ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)UNSET, &value, (ShenandoahSharedValue)SET); + ShenandoahSharedValue old = Atomic::cmpxchg(&value, (ShenandoahSharedValue)SET, (ShenandoahSharedValue)UNSET); return old == SET; // success } @@ -125,7 +125,7 @@ } ShenandoahSharedValue nv = ov | mask_val; - if (Atomic::cmpxchg(nv, &value, ov) == ov) { + if (Atomic::cmpxchg(&value, ov, nv) == ov) { // successfully set return; } @@ -143,7 +143,7 @@ } ShenandoahSharedValue nv = ov & ~mask_val; - if (Atomic::cmpxchg(nv, &value, ov) == ov) { + if (Atomic::cmpxchg(&value, ov, nv) == ov) { // successfully unset return; } @@ -221,7 +221,7 @@ T cmpxchg(T new_value, T expected) { assert (new_value >= 0, "sanity"); assert (new_value < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); - return (T)Atomic::cmpxchg((ShenandoahSharedValue)new_value, &value, (ShenandoahSharedValue)expected); + return (T)Atomic::cmpxchg(&value, (ShenandoahSharedValue)expected, (ShenandoahSharedValue)new_value); } volatile ShenandoahSharedValue* addr_of() { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zBarrier.inline.hpp --- a/src/hotspot/share/gc/z/zBarrier.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -48,7 +48,7 @@ } // Heal - const uintptr_t prev_addr = Atomic::cmpxchg(heal_addr, (volatile uintptr_t*)p, addr); + const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr); if (prev_addr == addr) { // Success return; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zBarrierSet.hpp --- a/src/hotspot/share/gc/z/zBarrierSet.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zBarrierSet.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -70,8 +70,8 @@ static oop oop_load_in_heap_at(oop base, ptrdiff_t offset); template - static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value); - static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value); + static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value); + static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value); template static oop oop_atomic_xchg_in_heap(T* addr, oop new_value); @@ -91,7 +91,7 @@ static oop oop_load_not_in_heap(T* addr); template - static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value); + static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value); template static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zBarrierSet.inline.hpp --- a/src/hotspot/share/gc/z/zBarrierSet.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -132,16 +132,16 @@ template template -inline oop ZBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) { +inline oop ZBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) { verify_decorators_present(); verify_decorators_absent(); ZBarrier::load_barrier_on_oop_field(addr); - return Raw::oop_atomic_cmpxchg_in_heap(new_value, addr, compare_value); + return Raw::oop_atomic_cmpxchg_in_heap(addr, compare_value, new_value); } template -inline oop ZBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) { +inline oop ZBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) { verify_decorators_present(); verify_decorators_absent(); @@ -150,7 +150,7 @@ // with the motivation that if you're doing Unsafe operations on a Reference.referent // field, then you're on your own anyway. ZBarrier::load_barrier_on_oop_field(field_addr(base, offset)); - return Raw::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value); + return Raw::oop_atomic_cmpxchg_in_heap_at(base, offset, compare_value, new_value); } template @@ -222,11 +222,11 @@ template template -inline oop ZBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) { +inline oop ZBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) { verify_decorators_present(); verify_decorators_absent(); - return Raw::oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value); + return Raw::oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value); } template diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zBitMap.inline.hpp --- a/src/hotspot/share/gc/z/zBitMap.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zBitMap.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -55,7 +55,7 @@ inc_live = false; return false; } - const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val); + const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val); if (cur_val == old_val) { // Success const bm_word_t marked_mask = bit_mask(bit); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zForwarding.inline.hpp --- a/src/hotspot/share/gc/z/zForwarding.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zForwarding.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -63,7 +63,7 @@ while (refcount > 0) { const uint32_t old_refcount = refcount; const uint32_t new_refcount = old_refcount + 1; - const uint32_t prev_refcount = Atomic::cmpxchg(new_refcount, &_refcount, old_refcount); + const uint32_t prev_refcount = Atomic::cmpxchg(&_refcount, old_refcount, new_refcount); if (prev_refcount == old_refcount) { return true; } @@ -139,7 +139,7 @@ const ZForwardingEntry old_entry; // Empty for (;;) { - const ZForwardingEntry prev_entry = Atomic::cmpxchg(new_entry, entries() + *cursor, old_entry); + const ZForwardingEntry prev_entry = Atomic::cmpxchg(entries() + *cursor, old_entry, new_entry); if (!prev_entry.populated()) { // Success return to_offset; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zLiveMap.cpp --- a/src/hotspot/share/gc/z/zLiveMap.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zLiveMap.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -58,7 +58,7 @@ seqnum != ZGlobalSeqNum; seqnum = Atomic::load_acquire(&_seqnum)) { if ((seqnum != seqnum_initializing) && - (Atomic::cmpxchg(seqnum_initializing, &_seqnum, seqnum) == seqnum)) { + (Atomic::cmpxchg(&_seqnum, seqnum, seqnum_initializing) == seqnum)) { // Reset marking information _live_bytes = 0; _live_objects = 0; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zMarkStack.inline.hpp --- a/src/hotspot/share/gc/z/zMarkStack.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zMarkStack.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -121,7 +121,7 @@ for (;;) { decode_versioned_pointer(vstack, stack->next_addr(), &version); T* const new_vstack = encode_versioned_pointer(stack, version + 1); - T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack); + T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack); if (prev_vstack == vstack) { // Success break; @@ -145,7 +145,7 @@ } T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1); - T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack); + T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack); if (prev_vstack == vstack) { // Success return stack; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zMarkStackAllocator.cpp --- a/src/hotspot/share/gc/z/zMarkStackAllocator.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zMarkStackAllocator.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -70,7 +70,7 @@ return 0; } - const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, top); + const uintptr_t prev_top = Atomic::cmpxchg(&_top, top, new_top); if (prev_top == top) { // Success return top; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zMarkTerminate.inline.hpp --- a/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -49,7 +49,7 @@ } const uint new_nworking = nworking + 1; - const uint prev_nworking = Atomic::cmpxchg(new_nworking, nworking_stage, nworking); + const uint prev_nworking = Atomic::cmpxchg(nworking_stage, nworking, new_nworking); if (prev_nworking == nworking) { // Success return true; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zObjectAllocator.cpp --- a/src/hotspot/share/gc/z/zObjectAllocator.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -97,7 +97,7 @@ retry: // Install new page - ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page); + ZPage* const prev_page = Atomic::cmpxchg(shared_page, page, new_page); if (prev_page != page) { if (prev_page == NULL) { // Previous page was retired, retry installing the new page diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zOopClosures.inline.hpp --- a/src/hotspot/share/gc/z/zOopClosures.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zOopClosures.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -96,7 +96,7 @@ // oop here again (the object would be strongly live and we would // not consider clearing such oops), so therefore we don't have an // ABA problem here. - Atomic::cmpxchg(oop(NULL), p, obj); + Atomic::cmpxchg(p, obj, oop(NULL)); } } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zPage.inline.hpp --- a/src/hotspot/share/gc/z/zPage.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zPage.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -255,7 +255,7 @@ return 0; } - const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, addr); + const uintptr_t prev_top = Atomic::cmpxchg(&_top, addr, new_top); if (prev_top == addr) { // Success return ZAddress::good(addr); @@ -299,7 +299,7 @@ return false; } - const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, old_top); + const uintptr_t prev_top = Atomic::cmpxchg(&_top, old_top, new_top); if (prev_top == old_top) { // Success return true; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zRootsIterator.cpp --- a/src/hotspot/share/gc/z/zRootsIterator.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zRootsIterator.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -91,7 +91,7 @@ template void ZSerialOopsDo::oops_do(ZRootsIteratorClosure* cl) { - if (!_claimed && Atomic::cmpxchg(true, &_claimed, false) == false) { + if (!_claimed && Atomic::cmpxchg(&_claimed, false, true) == false) { (_iter->*F)(cl); } } @@ -118,7 +118,7 @@ template void ZSerialWeakOopsDo::weak_oops_do(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl) { - if (!_claimed && Atomic::cmpxchg(true, &_claimed, false) == false) { + if (!_claimed && Atomic::cmpxchg(&_claimed, false, true) == false) { (_iter->*F)(is_alive, cl); } } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/gc/z/zStat.cpp --- a/src/hotspot/share/gc/z/zStat.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/gc/z/zStat.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -772,7 +772,7 @@ } const uint64_t new_max = value; - const uint64_t prev_max = Atomic::cmpxchg(new_max, &cpu_data->_max, max); + const uint64_t prev_max = Atomic::cmpxchg(&cpu_data->_max, max, new_max); if (prev_max == max) { // Success break; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/interpreter/bytecodeInterpreter.cpp --- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -2163,7 +2163,7 @@ HeapWord* compare_to = *Universe::heap()->top_addr(); HeapWord* new_top = compare_to + obj_size; if (new_top <= *Universe::heap()->end_addr()) { - if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { + if (Atomic::cmpxchg(Universe::heap()->top_addr(), compare_to, new_top) != compare_to) { goto retry; } result = (oop) compare_to; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/interpreter/oopMapCache.cpp --- a/src/hotspot/share/interpreter/oopMapCache.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/interpreter/oopMapCache.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -452,7 +452,7 @@ } bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) { - return Atomic::cmpxchg(entry, &_array[i % _size], old) == old; + return Atomic::cmpxchg(&_array[i % _size], old, entry) == old; } void OopMapCache::flush() { @@ -564,7 +564,7 @@ do { head = _old_entries; entry->_next = head; - success = Atomic::cmpxchg(entry, &_old_entries, head) == head; + success = Atomic::cmpxchg(&_old_entries, head, entry) == head; } while (!success); if (log_is_enabled(Debug, interpreter, oopmap)) { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -93,7 +93,7 @@ ObjectSampler* ObjectSampler::acquire() { assert(is_created(), "invariant"); - while (Atomic::cmpxchg(1, &_lock, 0) == 1) {} + while (Atomic::cmpxchg(&_lock, 0, 1) == 1) {} return _instance; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -46,7 +46,7 @@ do { compare_value = *dest; exchange_value = compare_value + 1; - } while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value); + } while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value); return exchange_value; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -62,7 +62,7 @@ do { const jbyte current = *dest; const jbyte new_value = op(current, bits); - if (Atomic::cmpxchg(new_value, dest, current) == current) { + if (Atomic::cmpxchg(dest, current, new_value) == current) { return; } } while (true); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp --- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -418,7 +418,7 @@ static volatile int jfr_shutdown_lock = 0; static bool guard_reentrancy() { - return Atomic::cmpxchg(1, &jfr_shutdown_lock, 0) == 0; + return Atomic::cmpxchg(&jfr_shutdown_lock, 0, 1) == 0; } class JavaThreadInVM : public StackObj { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp --- a/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -87,7 +87,7 @@ const int current_msgs = Atomic::load(&_messages); // OR the new message const int exchange_value = current_msgs | new_messages; - const int result = Atomic::cmpxchg(exchange_value, &_messages, current_msgs); + const int result = Atomic::cmpxchg(&_messages, current_msgs, exchange_value); if (result == current_msgs) { return; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp --- a/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -104,7 +104,7 @@ const u1* JfrBuffer::concurrent_top() const { do { const u1* current_top = stable_top(); - if (Atomic::cmpxchg(MUTEX_CLAIM, &_top, current_top) == current_top) { + if (Atomic::cmpxchg(&_top, current_top, MUTEX_CLAIM) == current_top) { return current_top; } } while (true); @@ -128,13 +128,13 @@ const void* current_id; do { current_id = Atomic::load(&_identity); - } while (current_id != NULL || Atomic::cmpxchg(id, &_identity, current_id) != current_id); + } while (current_id != NULL || Atomic::cmpxchg(&_identity, current_id, id) != current_id); } bool JfrBuffer::try_acquire(const void* id) { assert(id != NULL, "invariant"); const void* const current_id = Atomic::load(&_identity); - return current_id == NULL && Atomic::cmpxchg(id, &_identity, current_id) == current_id; + return current_id == NULL && Atomic::cmpxchg(&_identity, current_id, id) == current_id; } void JfrBuffer::release() { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jfr/recorder/storage/jfrStorageControl.cpp --- a/src/hotspot/share/jfr/recorder/storage/jfrStorageControl.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jfr/recorder/storage/jfrStorageControl.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -34,7 +34,7 @@ do { compare_value = *dest; exchange_value = compare_value + value; - } while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value); + } while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value); return exchange_value; } @@ -45,7 +45,7 @@ compare_value = *dest; assert(compare_value >= 1, "invariant"); exchange_value = compare_value - 1; - } while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value); + } while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value); return exchange_value; } @@ -137,4 +137,3 @@ void JfrStorageControl::set_scavenge_threshold(size_t number_of_dead_buffers) { _scavenge_threshold = number_of_dead_buffers; } - diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jfr/utilities/jfrAllocation.cpp --- a/src/hotspot/share/jfr/utilities/jfrAllocation.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jfr/utilities/jfrAllocation.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -41,7 +41,7 @@ do { compare_value = *dest; exchange_value = compare_value + value; - } while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value); + } while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value); return exchange_value; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jfr/utilities/jfrTryLock.hpp --- a/src/hotspot/share/jfr/utilities/jfrTryLock.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jfr/utilities/jfrTryLock.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -36,7 +36,7 @@ bool _has_lock; public: - JfrTryLock(volatile int* lock) : _lock(lock), _has_lock(Atomic::cmpxchg(1, lock, 0) == 0) {} + JfrTryLock(volatile int* lock) : _lock(lock), _has_lock(Atomic::cmpxchg(lock, 0, 1) == 0) {} ~JfrTryLock() { if (_has_lock) { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jvmci/jvmciCompilerToVM.cpp --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -1586,7 +1586,7 @@ jint res = main_vm.AttachCurrentThread((void**)&hotspotEnv, NULL); _attached = res == JNI_OK; static volatile int report_attach_error = 0; - if (res != JNI_OK && report_attach_error == 0 && Atomic::cmpxchg(1, &report_attach_error, 0) == 0) { + if (res != JNI_OK && report_attach_error == 0 && Atomic::cmpxchg(&report_attach_error, 0, 1) == 0) { // Only report an attach error once jio_printf("Warning: attaching current thread to VM failed with %d (future attach errors are suppressed)\n", res); } @@ -1599,7 +1599,7 @@ extern struct JavaVM_ main_vm; jint res = main_vm.DetachCurrentThread(); static volatile int report_detach_error = 0; - if (res != JNI_OK && report_detach_error == 0 && Atomic::cmpxchg(1, &report_detach_error, 0) == 0) { + if (res != JNI_OK && report_detach_error == 0 && Atomic::cmpxchg(&report_detach_error, 0, 1) == 0) { // Only report an attach error once jio_printf("Warning: detaching current thread from VM failed with %d (future attach errors are suppressed)\n", res); } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jvmci/jvmciRuntime.cpp --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -917,7 +917,7 @@ JavaThread* THREAD = JavaThread::current(); static volatile int report_error = 0; - if (!report_error && Atomic::cmpxchg(1, &report_error, 0) == 0) { + if (!report_error && Atomic::cmpxchg(&report_error, 0, 1) == 0) { // Only report an error once tty->print_raw_cr(message); if (JVMCIENV != NULL) { @@ -1295,7 +1295,7 @@ static void fatal_exception_in_compile(JVMCIEnv* JVMCIENV, JavaThread* thread, const char* msg) { // Only report a fatal JVMCI compilation exception once static volatile int report_init_failure = 0; - if (!report_init_failure && Atomic::cmpxchg(1, &report_init_failure, 0) == 0) { + if (!report_init_failure && Atomic::cmpxchg(&report_init_failure, 0, 1) == 0) { tty->print_cr("%s:", msg); JVMCIENV->describe_pending_exception(true); } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/jvmci/metadataHandleBlock.cpp --- a/src/hotspot/share/jvmci/metadataHandleBlock.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/jvmci/metadataHandleBlock.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -141,7 +141,7 @@ // but can't be put on the free list yet. The // HandleCleaner will set this to NULL and // put it on the free list. - jlong old_value = Atomic::cmpxchg((jlong) (ptr_tag), (jlong*)handle, (jlong) value); + jlong old_value = Atomic::cmpxchg((jlong*)handle, (jlong) value, (jlong) (ptr_tag)); if (old_value == (jlong) value) { // Success } else { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/logging/logDecorations.cpp --- a/src/hotspot/share/logging/logDecorations.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/logging/logDecorations.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -49,7 +49,7 @@ char buffer[1024]; if (os::get_host_name(buffer, sizeof(buffer))) { host_name = os::strdup_check_oom(buffer); - const char* old_value = Atomic::cmpxchg(host_name, &_host_name, (const char*)NULL); + const char* old_value = Atomic::cmpxchg(&_host_name, (const char*)NULL, host_name); if (old_value != NULL) { os::free((void *) host_name); host_name = old_value; @@ -147,4 +147,3 @@ int written = jio_snprintf(pos, DecorationsBufferSize - (pos - _decorations_buffer), "%s", host_name()); ASSERT_AND_RETURN(written, pos) } - diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/memory/metaspace.cpp --- a/src/hotspot/share/memory/metaspace.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/memory/metaspace.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -162,7 +162,7 @@ if (can_retry != NULL) { *can_retry = true; } - size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC); + size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value); if (old_capacity_until_GC != prev_value) { return false; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/access.hpp --- a/src/hotspot/share/oops/access.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/access.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -165,9 +165,9 @@ } template - static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { + static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) { verify_primitive_decorators(); - return AccessInternal::atomic_cmpxchg_at(new_value, base, offset, compare_value); + return AccessInternal::atomic_cmpxchg_at(base, offset, compare_value, new_value); } template @@ -191,12 +191,12 @@ } template - static inline T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { + static inline T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) { verify_heap_oop_decorators(); typedef typename AccessInternal::OopOrNarrowOop::type OopType; OopType new_oop_value = new_value; OopType compare_oop_value = compare_value; - return AccessInternal::atomic_cmpxchg_at(new_oop_value, base, offset, compare_oop_value); + return AccessInternal::atomic_cmpxchg_at(base, offset, compare_oop_value, new_oop_value); } template @@ -227,9 +227,9 @@ } template - static inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) { + static inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) { verify_primitive_decorators(); - return AccessInternal::atomic_cmpxchg(new_value, addr, compare_value); + return AccessInternal::atomic_cmpxchg(addr, compare_value, new_value); } template @@ -254,12 +254,12 @@ } template - static inline T oop_atomic_cmpxchg(T new_value, P* addr, T compare_value) { + static inline T oop_atomic_cmpxchg(P* addr, T compare_value, T new_value) { verify_oop_decorators(); typedef typename AccessInternal::OopOrNarrowOop::type OopType; OopType new_oop_value = new_value; OopType compare_oop_value = compare_value; - return AccessInternal::atomic_cmpxchg(new_oop_value, addr, compare_oop_value); + return AccessInternal::atomic_cmpxchg(addr, compare_oop_value, new_oop_value); } template diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/access.inline.hpp --- a/src/hotspot/share/oops/access.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/access.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -107,16 +107,16 @@ template struct PostRuntimeDispatch: public AllStatic { template - static T access_barrier(T new_value, void* addr, T compare_value) { - return GCBarrierType::atomic_cmpxchg_in_heap(new_value, reinterpret_cast(addr), compare_value); + static T access_barrier(void* addr, T compare_value, T new_value) { + return GCBarrierType::atomic_cmpxchg_in_heap(reinterpret_cast(addr), compare_value, new_value); } - static oop oop_access_barrier(oop new_value, void* addr, oop compare_value) { + static oop oop_access_barrier(void* addr, oop compare_value, oop new_value) { typedef typename HeapOopType::type OopType; if (HasDecorator::value) { - return GCBarrierType::oop_atomic_cmpxchg_in_heap(new_value, reinterpret_cast(addr), compare_value); + return GCBarrierType::oop_atomic_cmpxchg_in_heap(reinterpret_cast(addr), compare_value, new_value); } else { - return GCBarrierType::oop_atomic_cmpxchg_not_in_heap(new_value, reinterpret_cast(addr), compare_value); + return GCBarrierType::oop_atomic_cmpxchg_not_in_heap(reinterpret_cast(addr), compare_value, new_value); } } }; @@ -183,12 +183,12 @@ template struct PostRuntimeDispatch: public AllStatic { template - static T access_barrier(T new_value, oop base, ptrdiff_t offset, T compare_value) { - return GCBarrierType::atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value); + static T access_barrier(oop base, ptrdiff_t offset, T compare_value, T new_value) { + return GCBarrierType::atomic_cmpxchg_in_heap_at(base, offset, compare_value, new_value); } - static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset, oop compare_value) { - return GCBarrierType::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value); + static oop oop_access_barrier(oop base, ptrdiff_t offset, oop compare_value, oop new_value) { + return GCBarrierType::oop_atomic_cmpxchg_in_heap_at(base, offset, compare_value, new_value); } }; @@ -309,17 +309,17 @@ } template - T RuntimeDispatch::atomic_cmpxchg_init(T new_value, void* addr, T compare_value) { + T RuntimeDispatch::atomic_cmpxchg_init(void* addr, T compare_value, T new_value) { func_t function = BarrierResolver::resolve_barrier(); _atomic_cmpxchg_func = function; - return function(new_value, addr, compare_value); + return function(addr, compare_value, new_value); } template - T RuntimeDispatch::atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) { + T RuntimeDispatch::atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value) { func_t function = BarrierResolver::resolve_barrier(); _atomic_cmpxchg_at_func = function; - return function(new_value, base, offset, compare_value); + return function(base, offset, compare_value, new_value); } template diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/accessBackend.hpp --- a/src/hotspot/share/oops/accessBackend.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/accessBackend.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -102,12 +102,12 @@ struct AccessFunctionTypes { typedef T (*load_at_func_t)(oop base, ptrdiff_t offset); typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value); - typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value); + typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value); typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value); typedef T (*load_func_t)(void* addr); typedef void (*store_func_t)(void* addr, T value); - typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value); + typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value); typedef T (*atomic_xchg_func_t)(void* addr, T new_value); typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, @@ -293,12 +293,12 @@ template static typename EnableIf< HasDecorator::value, T>::type - atomic_cmpxchg_internal(T new_value, void* addr, T compare_value); + atomic_cmpxchg_internal(void* addr, T compare_value, T new_value); template static typename EnableIf< HasDecorator::value, T>::type - atomic_cmpxchg_internal(T new_value, void* addr, T compare_value); + atomic_cmpxchg_internal(void* addr, T compare_value, T new_value); template static typename EnableIf< @@ -312,14 +312,14 @@ template static inline typename EnableIf< !AccessInternal::PossiblyLockedAccess::value, T>::type - atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) { - return atomic_cmpxchg_internal(new_value, addr, compare_value); + atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value) { + return atomic_cmpxchg_internal(addr, compare_value, new_value); } template static typename EnableIf< AccessInternal::PossiblyLockedAccess::value, T>::type - atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value); + atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value); template static inline typename EnableIf< @@ -345,8 +345,8 @@ } template - static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) { - return atomic_cmpxchg_maybe_locked(new_value, addr, compare_value); + static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) { + return atomic_cmpxchg_maybe_locked(addr, compare_value, new_value); } template @@ -370,9 +370,9 @@ static T oop_load_at(oop base, ptrdiff_t offset); template - static T oop_atomic_cmpxchg(T new_value, void* addr, T compare_value); + static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value); template - static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value); + static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value); template static T oop_atomic_xchg(void* addr, T new_value); @@ -390,8 +390,8 @@ } template - static T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { - return atomic_cmpxchg(new_value, field_addr(base, offset), compare_value); + static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) { + return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value); } template @@ -515,10 +515,10 @@ typedef typename AccessFunction::type func_t; static func_t _atomic_cmpxchg_func; - static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value); + static T atomic_cmpxchg_init(void* addr, T compare_value, T new_value); - static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) { - return _atomic_cmpxchg_func(new_value, addr, compare_value); + static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) { + return _atomic_cmpxchg_func(addr, compare_value, new_value); } }; @@ -527,10 +527,10 @@ typedef typename AccessFunction::type func_t; static func_t _atomic_cmpxchg_at_func; - static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value); + static T atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value); - static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { - return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value); + static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) { + return _atomic_cmpxchg_at_func(base, offset, compare_value, new_value); } }; @@ -782,56 +782,56 @@ template inline static typename EnableIf< HasDecorator::value && CanHardwireRaw::value, T>::type - atomic_cmpxchg(T new_value, void* addr, T compare_value) { + atomic_cmpxchg(void* addr, T compare_value, T new_value) { typedef RawAccessBarrier Raw; if (HasDecorator::value) { - return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); + return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value); } else { - return Raw::atomic_cmpxchg(new_value, addr, compare_value); + return Raw::atomic_cmpxchg(addr, compare_value, new_value); } } template inline static typename EnableIf< HasDecorator::value && !CanHardwireRaw::value, T>::type - atomic_cmpxchg(T new_value, void* addr, T compare_value) { + atomic_cmpxchg(void* addr, T compare_value, T new_value) { if (UseCompressedOops) { const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; - return PreRuntimeDispatch::atomic_cmpxchg(new_value, addr, compare_value); + return PreRuntimeDispatch::atomic_cmpxchg(addr, compare_value, new_value); } else { const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; - return PreRuntimeDispatch::atomic_cmpxchg(new_value, addr, compare_value); + return PreRuntimeDispatch::atomic_cmpxchg(addr, compare_value, new_value); } } template inline static typename EnableIf< !HasDecorator::value, T>::type - atomic_cmpxchg(T new_value, void* addr, T compare_value) { + atomic_cmpxchg(void* addr, T compare_value, T new_value) { if (is_hardwired_primitive()) { const DecoratorSet expanded_decorators = decorators | AS_RAW; - return PreRuntimeDispatch::atomic_cmpxchg(new_value, addr, compare_value); + return PreRuntimeDispatch::atomic_cmpxchg(addr, compare_value, new_value); } else { - return RuntimeDispatch::atomic_cmpxchg(new_value, addr, compare_value); + return RuntimeDispatch::atomic_cmpxchg(addr, compare_value, new_value); } } template inline static typename EnableIf< HasDecorator::value, T>::type - atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { - return atomic_cmpxchg(new_value, field_addr(base, offset), compare_value); + atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) { + return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value); } template inline static typename EnableIf< !HasDecorator::value, T>::type - atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { + atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) { if (is_hardwired_primitive()) { const DecoratorSet expanded_decorators = decorators | AS_RAW; - return PreRuntimeDispatch::atomic_cmpxchg_at(new_value, base, offset, compare_value); + return PreRuntimeDispatch::atomic_cmpxchg_at(base, offset, compare_value, new_value); } else { - return RuntimeDispatch::atomic_cmpxchg_at(new_value, base, offset, compare_value); + return RuntimeDispatch::atomic_cmpxchg_at(base, offset, compare_value, new_value); } } @@ -1018,30 +1018,30 @@ } template - inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) { - return PreRuntimeDispatch::atomic_cmpxchg(new_value, addr, compare_value); + inline T atomic_cmpxchg_reduce_types(T* addr, T compare_value, T new_value) { + return PreRuntimeDispatch::atomic_cmpxchg(addr, compare_value, new_value); } template - inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) { + inline oop atomic_cmpxchg_reduce_types(narrowOop* addr, oop compare_value, oop new_value) { const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_RT_USE_COMPRESSED_OOPS; - return PreRuntimeDispatch::atomic_cmpxchg(new_value, addr, compare_value); + return PreRuntimeDispatch::atomic_cmpxchg(addr, compare_value, new_value); } template - inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) { + inline narrowOop atomic_cmpxchg_reduce_types(narrowOop* addr, narrowOop compare_value, narrowOop new_value) { const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_RT_USE_COMPRESSED_OOPS; - return PreRuntimeDispatch::atomic_cmpxchg(new_value, addr, compare_value); + return PreRuntimeDispatch::atomic_cmpxchg(addr, compare_value, new_value); } template - inline oop atomic_cmpxchg_reduce_types(oop new_value, - HeapWord* addr, - oop compare_value) { + inline oop atomic_cmpxchg_reduce_types(HeapWord* addr, + oop compare_value, + oop new_value) { const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; - return PreRuntimeDispatch::atomic_cmpxchg(new_value, addr, compare_value); + return PreRuntimeDispatch::atomic_cmpxchg(addr, compare_value, new_value); } template @@ -1191,7 +1191,7 @@ } template - inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) { + inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) { verify_types(); typedef typename Decay

::type DecayedP; typedef typename Decay::type DecayedT; @@ -1200,13 +1200,13 @@ const DecoratorSet expanded_decorators = DecoratorFixup< (!HasDecorator::value) ? (MO_SEQ_CST | decorators) : decorators>::value; - return atomic_cmpxchg_reduce_types(new_decayed_value, - const_cast(addr), - compare_decayed_value); + return atomic_cmpxchg_reduce_types(const_cast(addr), + compare_decayed_value, + new_decayed_value); } template - inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { + inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) { verify_types(); typedef typename Decay::type DecayedT; DecayedT new_decayed_value = new_value; @@ -1219,8 +1219,8 @@ const DecoratorSet final_decorators = expanded_decorators | (HasDecorator::value ? INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE); - return PreRuntimeDispatch::atomic_cmpxchg_at(new_decayed_value, base, - offset, compare_decayed_value); + return PreRuntimeDispatch::atomic_cmpxchg_at(base, offset, compare_decayed_value, + new_decayed_value); } template diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/accessBackend.inline.hpp --- a/src/hotspot/share/oops/accessBackend.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/accessBackend.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -85,20 +85,20 @@ template template -inline T RawAccessBarrier::oop_atomic_cmpxchg(T new_value, void* addr, T compare_value) { +inline T RawAccessBarrier::oop_atomic_cmpxchg(void* addr, T compare_value, T new_value) { typedef typename AccessInternal::EncodedType::type Encoded; Encoded encoded_new = encode(new_value); Encoded encoded_compare = encode(compare_value); - Encoded encoded_result = atomic_cmpxchg(encoded_new, - reinterpret_cast(addr), - encoded_compare); + Encoded encoded_result = atomic_cmpxchg(reinterpret_cast(addr), + encoded_compare, + encoded_new); return decode(encoded_result); } template template -inline T RawAccessBarrier::oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { - return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value); +inline T RawAccessBarrier::oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) { + return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value); } template @@ -181,10 +181,10 @@ template inline typename EnableIf< HasDecorator::value, T>::type -RawAccessBarrier::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) { - return Atomic::cmpxchg(new_value, - reinterpret_cast(addr), +RawAccessBarrier::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) { + return Atomic::cmpxchg(reinterpret_cast(addr), compare_value, + new_value, memory_order_relaxed); } @@ -192,10 +192,10 @@ template inline typename EnableIf< HasDecorator::value, T>::type -RawAccessBarrier::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) { - return Atomic::cmpxchg(new_value, - reinterpret_cast(addr), +RawAccessBarrier::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) { + return Atomic::cmpxchg(reinterpret_cast(addr), compare_value, + new_value, memory_order_conservative); } @@ -232,9 +232,9 @@ template inline typename EnableIf< AccessInternal::PossiblyLockedAccess::value, T>::type -RawAccessBarrier::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) { +RawAccessBarrier::atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value) { if (!AccessInternal::wide_atomic_needs_locking()) { - return atomic_cmpxchg_internal(new_value, addr, compare_value); + return atomic_cmpxchg_internal(addr, compare_value, new_value); } else { AccessInternal::AccessLocker access_lock; volatile T* p = reinterpret_cast(addr); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/constantPool.cpp --- a/src/hotspot/share/oops/constantPool.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/constantPool.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -808,8 +808,9 @@ // This doesn't deterministically get an error. So why do we save this? // We save this because jvmti can add classes to the bootclass path after // this error, so it needs to get the same error if the error is first. - jbyte old_tag = Atomic::cmpxchg((jbyte)error_tag, - (jbyte*)this_cp->tag_addr_at(which), (jbyte)tag.value()); + jbyte old_tag = Atomic::cmpxchg((jbyte*)this_cp->tag_addr_at(which), + (jbyte)tag.value(), + (jbyte)error_tag); if (old_tag != error_tag && old_tag != tag.value()) { // MethodHandles and MethodType doesn't change to resolved version. assert(this_cp->tag_at(which).is_klass(), "Wrong tag value"); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/cpCache.cpp --- a/src/hotspot/share/oops/cpCache.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/cpCache.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -159,7 +159,7 @@ // sure that the final parameter size agrees with what was passed. if (_flags == 0) { intx newflags = (value & parameter_size_mask); - Atomic::cmpxchg(newflags, &_flags, (intx)0); + Atomic::cmpxchg(&_flags, (intx)0, newflags); } guarantee(parameter_size() == value, "size must not change: parameter_size=%d, value=%d", parameter_size(), value); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/instanceKlass.cpp --- a/src/hotspot/share/oops/instanceKlass.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/instanceKlass.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -2200,7 +2200,7 @@ if (impl != NULL && !impl->is_loader_alive()) { // NULL this field, might be an unloaded klass or NULL Klass* volatile* klass = adr_implementor(); - if (Atomic::cmpxchg((Klass*)NULL, klass, impl) == impl) { + if (Atomic::cmpxchg(klass, impl, (Klass*)NULL) == impl) { // Successfully unlinking implementor. if (log_is_enabled(Trace, class, unload)) { ResourceMark rm; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/klass.cpp --- a/src/hotspot/share/oops/klass.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/klass.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -436,7 +436,7 @@ // Note that the prev_first_subklass is always alive, meaning no sibling_next links // are ever created to not alive klasses. This is an important invariant of the lock-free // cleaning protocol, that allows us to safely unlink dead klasses from the sibling list. - if (Atomic::cmpxchg(this, &super->_subklass, prev_first_subklass) == prev_first_subklass) { + if (Atomic::cmpxchg(&super->_subklass, prev_first_subklass, this) == prev_first_subklass) { return; } } @@ -451,7 +451,7 @@ return; } // Try to fix _subklass until it points at something not dead. - Atomic::cmpxchg(subklass->next_sibling(), &_subklass, subklass); + Atomic::cmpxchg(&_subklass, subklass, subklass->next_sibling()); } } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/method.cpp --- a/src/hotspot/share/oops/method.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/method.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -569,7 +569,7 @@ bool Method::init_method_counters(MethodCounters* counters) { // Try to install a pointer to MethodCounters, return true on success. - return Atomic::replace_if_null(counters, &_method_counters); + return Atomic::replace_if_null(&_method_counters, counters); } int Method::extra_stack_words() { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/methodData.cpp --- a/src/hotspot/share/oops/methodData.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/methodData.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -896,7 +896,7 @@ FailedSpeculation** cursor = failed_speculations_address; do { if (*cursor == NULL) { - FailedSpeculation* old_fs = Atomic::cmpxchg(fs, cursor, (FailedSpeculation*) NULL); + FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) NULL, fs); if (old_fs == NULL) { // Successfully appended fs to end of the list return true; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/objArrayOop.cpp --- a/src/hotspot/share/oops/objArrayOop.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/objArrayOop.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -36,7 +36,7 @@ } else { offs = objArrayOopDesc::obj_at_offset(index); } - return HeapAccess::oop_atomic_cmpxchg_at(exchange_value, as_oop(), offs, compare_value); + return HeapAccess::oop_atomic_cmpxchg_at(as_oop(), offs, compare_value, exchange_value); } Klass* objArrayOopDesc::element_klass() { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/oop.inline.hpp --- a/src/hotspot/share/oops/oop.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/oop.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -73,12 +73,12 @@ } markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) { - uintptr_t v = HeapAccess<>::atomic_cmpxchg_at(new_mark.value(), as_oop(), mark_offset_in_bytes(), old_mark.value()); + uintptr_t v = HeapAccess<>::atomic_cmpxchg_at(as_oop(), mark_offset_in_bytes(), old_mark.value(), new_mark.value()); return markWord(v); } markWord oopDesc::cas_set_mark_raw(markWord new_mark, markWord old_mark, atomic_memory_order order) { - return Atomic::cmpxchg(new_mark, &_mark, old_mark, order); + return Atomic::cmpxchg(&_mark, old_mark, new_mark, order); } void oopDesc::init_mark() { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/oops/symbol.cpp --- a/src/hotspot/share/oops/symbol.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/oops/symbol.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -281,7 +281,7 @@ } else if (refc == 0) { return false; // dead, can't revive. } else { - found = Atomic::cmpxchg(old_value + 1, &_length_and_refcount, old_value); + found = Atomic::cmpxchg(&_length_and_refcount, old_value, old_value + 1); if (found == old_value) { return true; // successfully updated. } @@ -324,7 +324,7 @@ #endif return; } else { - found = Atomic::cmpxchg(old_value - 1, &_length_and_refcount, old_value); + found = Atomic::cmpxchg(&_length_and_refcount, old_value, old_value - 1); if (found == old_value) { return; // successfully updated. } @@ -348,7 +348,7 @@ return; } else { int len = extract_length(old_value); - found = Atomic::cmpxchg(pack_length_and_refcount(len, PERM_REFCOUNT), &_length_and_refcount, old_value); + found = Atomic::cmpxchg(&_length_and_refcount, old_value, pack_length_and_refcount(len, PERM_REFCOUNT)); if (found == old_value) { return; // successfully updated. } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/opto/runtime.cpp --- a/src/hotspot/share/opto/runtime.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/opto/runtime.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -1659,7 +1659,7 @@ c->set_next(NULL); head = _named_counters; c->set_next(head); - } while (Atomic::cmpxchg(c, &_named_counters, head) != head); + } while (Atomic::cmpxchg(&_named_counters, head, c) != head); return c; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/prims/jni.cpp --- a/src/hotspot/share/prims/jni.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/prims/jni.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -273,7 +273,7 @@ _name = elementName; uintx count = 0; - while (Atomic::cmpxchg(1, &JNIHistogram_lock, 0) != 0) { + while (Atomic::cmpxchg(&JNIHistogram_lock, 0, 1) != 0) { while (Atomic::load_acquire(&JNIHistogram_lock) != 0) { count +=1; if ( (WarnOnStalledSpinLock > 0) @@ -3233,7 +3233,7 @@ return false; } - if (Atomic::cmpxchg(1, &directBufferSupportInitializeStarted, 0) == 0) { + if (Atomic::cmpxchg(&directBufferSupportInitializeStarted, 0, 1) == 0) { if (!lookupDirectBufferClasses(env)) { directBufferSupportInitializeFailed = 1; return false; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/prims/jvm.cpp --- a/src/hotspot/share/prims/jvm.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/prims/jvm.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -233,7 +233,7 @@ _name = elementName; uintx count = 0; - while (Atomic::cmpxchg(1, &JVMHistogram_lock, 0) != 0) { + while (Atomic::cmpxchg(&JVMHistogram_lock, 0, 1) != 0) { while (Atomic::load_acquire(&JVMHistogram_lock) != 0) { count +=1; if ( (WarnOnStalledSpinLock > 0) diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/prims/jvmtiRawMonitor.cpp --- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -121,7 +121,7 @@ void JvmtiRawMonitor::simple_enter(Thread* self) { for (;;) { - if (Atomic::replace_if_null(self, &_owner)) { + if (Atomic::replace_if_null(&_owner, self)) { return; } @@ -133,7 +133,7 @@ node._next = _entry_list; _entry_list = &node; OrderAccess::fence(); - if (_owner == NULL && Atomic::replace_if_null(self, &_owner)) { + if (_owner == NULL && Atomic::replace_if_null(&_owner, self)) { _entry_list = node._next; RawMonitor_lock->unlock(); return; @@ -322,10 +322,10 @@ jt->SR_lock()->lock_without_safepoint_check(); } // guarded by SR_lock to avoid racing with new external suspend requests. - contended = Atomic::cmpxchg(jt, &_owner, (Thread*)NULL); + contended = Atomic::cmpxchg(&_owner, (Thread*)NULL, jt); jt->SR_lock()->unlock(); } else { - contended = Atomic::cmpxchg(self, &_owner, (Thread*)NULL); + contended = Atomic::cmpxchg(&_owner, (Thread*)NULL, self); } if (contended == self) { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/prims/unsafe.cpp --- a/src/hotspot/share/prims/unsafe.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/prims/unsafe.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -903,7 +903,7 @@ oop e = JNIHandles::resolve(e_h); oop p = JNIHandles::resolve(obj); assert_field_offset_sane(p, offset); - oop res = HeapAccess::oop_atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e); + oop res = HeapAccess::oop_atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x); return JNIHandles::make_local(env, res); } UNSAFE_END @@ -911,10 +911,10 @@ oop p = JNIHandles::resolve(obj); if (p == NULL) { volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset); - return RawAccess<>::atomic_cmpxchg(x, addr, e); + return RawAccess<>::atomic_cmpxchg(addr, e, x); } else { assert_field_offset_sane(p, offset); - return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e); + return HeapAccess<>::atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x); } } UNSAFE_END @@ -922,10 +922,10 @@ oop p = JNIHandles::resolve(obj); if (p == NULL) { volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset); - return RawAccess<>::atomic_cmpxchg(x, addr, e); + return RawAccess<>::atomic_cmpxchg(addr, e, x); } else { assert_field_offset_sane(p, offset); - return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e); + return HeapAccess<>::atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x); } } UNSAFE_END @@ -934,7 +934,7 @@ oop e = JNIHandles::resolve(e_h); oop p = JNIHandles::resolve(obj); assert_field_offset_sane(p, offset); - oop ret = HeapAccess::oop_atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e); + oop ret = HeapAccess::oop_atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x); return ret == e; } UNSAFE_END @@ -942,10 +942,10 @@ oop p = JNIHandles::resolve(obj); if (p == NULL) { volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset); - return RawAccess<>::atomic_cmpxchg(x, addr, e) == e; + return RawAccess<>::atomic_cmpxchg(addr, e, x) == e; } else { assert_field_offset_sane(p, offset); - return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e) == e; + return HeapAccess<>::atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x) == e; } } UNSAFE_END @@ -953,10 +953,10 @@ oop p = JNIHandles::resolve(obj); if (p == NULL) { volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset); - return RawAccess<>::atomic_cmpxchg(x, addr, e) == e; + return RawAccess<>::atomic_cmpxchg(addr, e, x) == e; } else { assert_field_offset_sane(p, offset); - return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e) == e; + return HeapAccess<>::atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x) == e; } } UNSAFE_END diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/runtime/atomic.hpp --- a/src/hotspot/share/runtime/atomic.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/runtime/atomic.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -141,10 +141,10 @@ // value of *dest. cmpxchg*() provide: // compare-and-exchange - template - inline static D cmpxchg(T exchange_value, - D volatile* dest, + template + inline static D cmpxchg(D volatile* dest, U compare_value, + T exchange_value, atomic_memory_order order = memory_order_conservative); // Performs atomic compare of *dest and NULL, and replaces *dest @@ -152,8 +152,8 @@ // the comparison succeeded and the exchange occurred. This is // often used as part of lazy initialization, as a lock-free // alternative to the Double-Checked Locking Pattern. - template - inline static bool replace_if_null(T* value, D* volatile* dest, + template + inline static bool replace_if_null(D* volatile* dest, T* value, atomic_memory_order order = memory_order_conservative); private: @@ -293,7 +293,7 @@ // checking and limited conversions around calls to the // platform-specific implementation layer provided by // PlatformCmpxchg. - template + template struct CmpxchgImpl; // Platform-specific implementation of cmpxchg. Support for sizes @@ -306,11 +306,11 @@ // - platform_cmpxchg is an object of type PlatformCmpxchg. // // Then - // platform_cmpxchg(exchange_value, dest, compare_value, order) + // platform_cmpxchg(dest, compare_value, exchange_value, order) // must be a valid expression, returning a result convertible to T. // // A default definition is provided, which declares a function template - // T operator()(T, T volatile*, T, atomic_memory_order) const + // T operator()(T volatile*, T, T, atomic_memory_order) const // // For each required size, a platform must either provide an // appropriate definition of that function, or must entirely @@ -326,9 +326,9 @@ // helper function. template static T cmpxchg_using_helper(Fn fn, - T exchange_value, T volatile* dest, - T compare_value); + T compare_value, + T exchange_value); // Support platforms that do not provide Read-Modify-Write // byte-level atomic access. To use, derive PlatformCmpxchg<1> from @@ -568,9 +568,9 @@ template struct Atomic::PlatformCmpxchg { template - T operator()(T exchange_value, - T volatile* dest, + T operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const; }; @@ -579,9 +579,9 @@ // in this file, near the other definitions related to cmpxchg. struct Atomic::CmpxchgByteUsingInt { template - T operator()(T exchange_value, - T volatile* dest, + T operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const; }; @@ -745,22 +745,22 @@ reinterpret_cast(dest))); } -template -inline D Atomic::cmpxchg(T exchange_value, - D volatile* dest, +template +inline D Atomic::cmpxchg(D volatile* dest, U compare_value, + T exchange_value, atomic_memory_order order) { - return CmpxchgImpl()(exchange_value, dest, compare_value, order); + return CmpxchgImpl()(dest, compare_value, exchange_value, order); } -template -inline bool Atomic::replace_if_null(T* value, D* volatile* dest, +template +inline bool Atomic::replace_if_null(D* volatile* dest, T* value, atomic_memory_order order) { // Presently using a trivial implementation in terms of cmpxchg. // Consider adding platform support, to permit the use of compiler // intrinsics like gcc's __sync_bool_compare_and_swap. D* expected_null = NULL; - return expected_null == cmpxchg(value, dest, expected_null, order); + return expected_null == cmpxchg(dest, expected_null, value, order); } // Handle cmpxchg for integral and enum types. @@ -771,12 +771,12 @@ T, T, T, typename EnableIf::value || IsRegisteredEnum::value>::type> { - T operator()(T exchange_value, T volatile* dest, T compare_value, + T operator()(T volatile* dest, T compare_value, T exchange_value, atomic_memory_order order) const { // Forward to the platform handler for the size of T. - return PlatformCmpxchg()(exchange_value, - dest, + return PlatformCmpxchg()(dest, compare_value, + exchange_value, order); } }; @@ -790,21 +790,21 @@ // The exchange_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // exchange_value in the destination. -template +template struct Atomic::CmpxchgImpl< - T*, D*, U*, + D*, U*, T*, typename EnableIf::value && IsSame::type, typename RemoveCV::type>::value>::type> { - D* operator()(T* exchange_value, D* volatile* dest, U* compare_value, + D* operator()(D* volatile* dest, U* compare_value, T* exchange_value, atomic_memory_order order) const { // Allow derived to base conversion, and adding cv-qualifiers. D* new_value = exchange_value; // Don't care what the CV qualifiers for compare_value are, // but we need to match D* when calling platform support. D* old_value = const_cast(compare_value); - return PlatformCmpxchg()(new_value, dest, old_value, order); + return PlatformCmpxchg()(dest, old_value, new_value, order); } }; @@ -820,24 +820,24 @@ T, T, T, typename EnableIf::value>::type> { - T operator()(T exchange_value, T volatile* dest, T compare_value, + T operator()(T volatile* dest, T compare_value, T exchange_value, atomic_memory_order order) const { typedef PrimitiveConversions::Translate Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); return Translator::recover( - cmpxchg(Translator::decay(exchange_value), - reinterpret_cast(dest), + cmpxchg(reinterpret_cast(dest), Translator::decay(compare_value), + Translator::decay(exchange_value), order)); } }; template inline T Atomic::cmpxchg_using_helper(Fn fn, - T exchange_value, T volatile* dest, - T compare_value) { + T compare_value, + T exchange_value) { STATIC_ASSERT(sizeof(Type) == sizeof(T)); return PrimitiveConversions::cast( fn(PrimitiveConversions::cast(exchange_value), @@ -846,9 +846,9 @@ } template -inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(sizeof(T) == sizeof(uint8_t)); uint8_t canon_exchange_value = exchange_value; @@ -871,7 +871,7 @@ // ... except for the one byte we want to update reinterpret_cast(&new_value)[offset] = canon_exchange_value; - uint32_t res = cmpxchg(new_value, aligned_dest, cur, order); + uint32_t res = cmpxchg(aligned_dest, cur, new_value, order); if (res == cur) break; // success // at least one byte in the int changed value, so update diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/runtime/deoptimization.cpp --- a/src/hotspot/share/runtime/deoptimization.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/runtime/deoptimization.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -869,7 +869,7 @@ static BoxCache* singleton(Thread* thread) { if (_singleton == NULL) { BoxCache* s = new BoxCache(thread); - if (!Atomic::replace_if_null(s, &_singleton)) { + if (!Atomic::replace_if_null(&_singleton, s)) { delete s; } } @@ -923,7 +923,7 @@ static BooleanBoxCache* singleton(Thread* thread) { if (_singleton == NULL) { BooleanBoxCache* s = new BooleanBoxCache(thread); - if (!Atomic::replace_if_null(s, &_singleton)) { + if (!Atomic::replace_if_null(&_singleton, s)) { delete s; } } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/runtime/interfaceSupport.cpp --- a/src/hotspot/share/runtime/interfaceSupport.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/runtime/interfaceSupport.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -87,7 +87,7 @@ _name = elementName; uintx count = 0; - while (Atomic::cmpxchg(1, &RuntimeHistogram_lock, 0) != 0) { + while (Atomic::cmpxchg(&RuntimeHistogram_lock, 0, 1) != 0) { while (Atomic::load_acquire(&RuntimeHistogram_lock) != 0) { count +=1; if ( (WarnOnStalledSpinLock > 0) diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/runtime/objectMonitor.cpp --- a/src/hotspot/share/runtime/objectMonitor.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/runtime/objectMonitor.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -245,7 +245,7 @@ // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; - void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL); + void * cur = Atomic::cmpxchg(&_owner, (void*)NULL, Self); if (cur == NULL) { assert(_recursions == 0, "invariant"); return; @@ -403,7 +403,7 @@ int ObjectMonitor::TryLock(Thread * Self) { void * own = _owner; if (own != NULL) return 0; - if (Atomic::replace_if_null(Self, &_owner)) { + if (Atomic::replace_if_null(&_owner, Self)) { assert(_recursions == 0, "invariant"); return 1; } @@ -480,7 +480,7 @@ ObjectWaiter * nxt; for (;;) { node._next = nxt = _cxq; - if (Atomic::cmpxchg(&node, &_cxq, nxt) == nxt) break; + if (Atomic::cmpxchg(&_cxq, nxt, &node) == nxt) break; // Interference - the CAS failed because _cxq changed. Just retry. // As an optional optimization we retry the lock. @@ -518,7 +518,7 @@ if (nxt == NULL && _EntryList == NULL) { // Try to assume the role of responsible thread for the monitor. // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } - Atomic::replace_if_null(Self, &_Responsible); + Atomic::replace_if_null(&_Responsible, Self); } // The lock might have been released while this thread was occupied queueing @@ -773,7 +773,7 @@ ObjectWaiter * v = _cxq; assert(v != NULL, "invariant"); - if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) { + if (v != SelfNode || Atomic::cmpxchg(&_cxq, v, SelfNode->_next) != v) { // The CAS above can fail from interference IFF a "RAT" arrived. // In that case Self must be in the interior and can no longer be // at the head of cxq. @@ -959,7 +959,7 @@ // to reacquire the lock the responsibility for ensuring succession // falls to the new owner. // - if (!Atomic::replace_if_null(THREAD, &_owner)) { + if (!Atomic::replace_if_null(&_owner, THREAD)) { return; } @@ -995,7 +995,7 @@ // The following loop is tantamount to: w = swap(&cxq, NULL) for (;;) { assert(w != NULL, "Invariant"); - ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w); + ObjectWaiter * u = Atomic::cmpxchg(&_cxq, w, (ObjectWaiter*)NULL); if (u == w) break; w = u; } @@ -1459,7 +1459,7 @@ for (;;) { ObjectWaiter * front = _cxq; iterator->_next = front; - if (Atomic::cmpxchg(iterator, &_cxq, front) == front) { + if (Atomic::cmpxchg(&_cxq, front, iterator) == front) { break; } } @@ -1680,7 +1680,7 @@ Thread * ox = (Thread *) _owner; if (ox == NULL) { - ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL); + ox = (Thread*)Atomic::cmpxchg(&_owner, (void*)NULL, Self); if (ox == NULL) { // The CAS succeeded -- this thread acquired ownership // Take care of some bookkeeping to exit spin state. diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/runtime/os.cpp --- a/src/hotspot/share/runtime/os.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/runtime/os.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -855,7 +855,7 @@ while (true) { unsigned int seed = _rand_seed; unsigned int rand = random_helper(seed); - if (Atomic::cmpxchg(rand, &_rand_seed, seed) == seed) { + if (Atomic::cmpxchg(&_rand_seed, seed, rand) == seed) { return static_cast(rand); } } @@ -1804,7 +1804,7 @@ os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from, os::SuspendResume::State to) { - os::SuspendResume::State result = Atomic::cmpxchg(to, &_state, from); + os::SuspendResume::State result = Atomic::cmpxchg(&_state, from, to); if (result == from) { // success return to; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/runtime/synchronizer.cpp --- a/src/hotspot/share/runtime/synchronizer.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/runtime/synchronizer.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -240,7 +240,7 @@ // and last are the inflated Java Monitor (ObjectMonitor) checks. lock->set_displaced_header(markWord::unused_mark()); - if (owner == NULL && Atomic::replace_if_null(self, &(m->_owner))) { + if (owner == NULL && Atomic::replace_if_null(&(m->_owner), self)) { assert(m->_recursions == 0, "invariant"); return true; } @@ -749,7 +749,7 @@ hash = get_next_hash(self, obj); // get a new hash temp = mark.copy_set_hash(hash); // merge the hash into header assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); - uintptr_t v = Atomic::cmpxchg(temp.value(), (volatile uintptr_t*)monitor->header_addr(), mark.value()); + uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value()); test = markWord(v); if (test != mark) { // The attempt to update the ObjectMonitor's header/dmw field diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/runtime/thread.cpp --- a/src/hotspot/share/runtime/thread.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/runtime/thread.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -861,7 +861,7 @@ bool Thread::claim_par_threads_do(uintx claim_token) { uintx token = _threads_do_token; if (token != claim_token) { - uintx res = Atomic::cmpxchg(claim_token, &_threads_do_token, token); + uintx res = Atomic::cmpxchg(&_threads_do_token, token, claim_token); if (res == token) { return true; } @@ -4875,7 +4875,7 @@ typedef volatile int SpinLockT; void Thread::SpinAcquire(volatile int * adr, const char * LockName) { - if (Atomic::cmpxchg (1, adr, 0) == 0) { + if (Atomic::cmpxchg(adr, 0, 1) == 0) { return; // normal fast-path return } @@ -4896,7 +4896,7 @@ SpinPause(); } } - if (Atomic::cmpxchg(1, adr, 0) == 0) return; + if (Atomic::cmpxchg(adr, 0, 1) == 0) return; } } @@ -4968,9 +4968,9 @@ const intptr_t LOCKBIT = 1; void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) { - intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0); + intptr_t w = Atomic::cmpxchg(Lock, (intptr_t)0, LOCKBIT); if (w == 0) return; - if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { + if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(Lock, w, w|LOCKBIT) == w) { return; } @@ -4982,7 +4982,7 @@ // Optional spin phase: spin-then-park strategy while (--its >= 0) { w = *Lock; - if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { + if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(Lock, w, w|LOCKBIT) == w) { return; } } @@ -4995,7 +4995,7 @@ for (;;) { w = *Lock; if ((w & LOCKBIT) == 0) { - if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { + if (Atomic::cmpxchg(Lock, w, w|LOCKBIT) == w) { Self->OnList = 0; // hygiene - allows stronger asserts return; } @@ -5003,7 +5003,7 @@ } assert(w & LOCKBIT, "invariant"); Self->ListNext = (ParkEvent *) (w & ~LOCKBIT); - if (Atomic::cmpxchg(intptr_t(Self)|LOCKBIT, Lock, w) == w) break; + if (Atomic::cmpxchg(Lock, w, intptr_t(Self)|LOCKBIT) == w) break; } while (Self->OnList != 0) { @@ -5039,7 +5039,7 @@ // store (CAS) to the lock-word that releases the lock becomes globally visible. void Thread::muxRelease(volatile intptr_t * Lock) { for (;;) { - const intptr_t w = Atomic::cmpxchg((intptr_t)0, Lock, LOCKBIT); + const intptr_t w = Atomic::cmpxchg(Lock, LOCKBIT, (intptr_t)0); assert(w & LOCKBIT, "invariant"); if (w == LOCKBIT) return; ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT); @@ -5050,7 +5050,7 @@ // The following CAS() releases the lock and pops the head element. // The CAS() also ratifies the previously fetched lock-word value. - if (Atomic::cmpxchg(intptr_t(nxt), Lock, w) != w) { + if (Atomic::cmpxchg(Lock, w, intptr_t(nxt)) != w) { continue; } List->OnList = 0; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/runtime/thread.inline.hpp --- a/src/hotspot/share/runtime/thread.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/runtime/thread.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -37,14 +37,14 @@ do { flags = _suspend_flags; } - while (Atomic::cmpxchg((flags | f), &_suspend_flags, flags) != flags); + while (Atomic::cmpxchg(&_suspend_flags, flags, (flags | f)) != flags); } inline void Thread::clear_suspend_flag(SuspendFlags f) { uint32_t flags; do { flags = _suspend_flags; } - while (Atomic::cmpxchg((flags & ~f), &_suspend_flags, flags) != flags); + while (Atomic::cmpxchg(&_suspend_flags, flags, (flags & ~f)) != flags); } inline void Thread::set_has_async_exception() { @@ -83,7 +83,7 @@ } inline ThreadsList* Thread::cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value) { - return (ThreadsList*)Atomic::cmpxchg(exchange_value, &_threads_hazard_ptr, compare_value); + return (ThreadsList*)Atomic::cmpxchg(&_threads_hazard_ptr, compare_value, exchange_value); } inline ThreadsList* Thread::get_threads_hazard_ptr() { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/runtime/threadSMR.cpp --- a/src/hotspot/share/runtime/threadSMR.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/runtime/threadSMR.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -156,7 +156,7 @@ // No need to update max value so we're done. break; } - if (Atomic::cmpxchg(new_value, &_deleted_thread_time_max, cur_value) == cur_value) { + if (Atomic::cmpxchg(&_deleted_thread_time_max, cur_value, new_value) == cur_value) { // Updated max value so we're done. Otherwise try it all again. break; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/runtime/threadSMR.inline.hpp --- a/src/hotspot/share/runtime/threadSMR.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/runtime/threadSMR.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -70,7 +70,7 @@ // No need to update max value so we're done. break; } - if (Atomic::cmpxchg(new_value, &_tlh_time_max, cur_value) == cur_value) { + if (Atomic::cmpxchg(&_tlh_time_max, cur_value, new_value) == cur_value) { // Updated max value so we're done. Otherwise try it all again. break; } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/services/attachListener.hpp --- a/src/hotspot/share/services/attachListener.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/services/attachListener.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -95,7 +95,7 @@ static AttachListenerState transit_state(AttachListenerState new_state, AttachListenerState cmp_state) { - return Atomic::cmpxchg(new_state, &_state, cmp_state); + return Atomic::cmpxchg(&_state, cmp_state, new_state); } static bool is_initialized() { diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/services/mallocSiteTable.cpp --- a/src/hotspot/share/services/mallocSiteTable.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/services/mallocSiteTable.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -128,7 +128,7 @@ if (entry == NULL) return NULL; // swap in the head - if (Atomic::replace_if_null(entry, &_table[index])) { + if (Atomic::replace_if_null(&_table[index], entry)) { return entry->data(); } @@ -229,7 +229,7 @@ do { val = *_lock; target = _MAGIC_ + *_lock; - } while (Atomic::cmpxchg(target, _lock, val) != val); + } while (Atomic::cmpxchg(_lock, val, target) != val); // wait for all readers to exit while (*_lock != _MAGIC_) { @@ -243,5 +243,5 @@ } bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) { - return Atomic::replace_if_null(entry, &_next); + return Atomic::replace_if_null(&_next, entry); } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/services/memTracker.cpp --- a/src/hotspot/share/services/memTracker.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/services/memTracker.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -183,7 +183,7 @@ // printing the final report during normal VM exit, it should not print // the final report again. In addition, it should be guarded from // recursive calls in case NMT reporting itself crashes. - if (Atomic::cmpxchg(true, &g_final_report_did_run, false) == false) { + if (Atomic::cmpxchg(&g_final_report_did_run, false, true) == false) { NMT_TrackingLevel level = tracking_level(); if (level >= NMT_summary) { report(level == NMT_summary, output); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/utilities/accessFlags.cpp --- a/src/hotspot/share/utilities/accessFlags.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/utilities/accessFlags.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -33,7 +33,7 @@ do { old_flags = _flags; new_flags = old_flags | bits; - f = Atomic::cmpxchg(new_flags, &_flags, old_flags); + f = Atomic::cmpxchg(&_flags, old_flags, new_flags); } while(f != old_flags); } @@ -43,7 +43,7 @@ do { old_flags = _flags; new_flags = old_flags & ~bits; - f = Atomic::cmpxchg(new_flags, &_flags, old_flags); + f = Atomic::cmpxchg(&_flags, old_flags, new_flags); } while(f != old_flags); } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/utilities/bitMap.cpp --- a/src/hotspot/share/utilities/bitMap.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/utilities/bitMap.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -217,7 +217,7 @@ bm_word_t mr = inverted_bit_mask_for_range(beg, end); bm_word_t nw = value ? (w | ~mr) : (w & mr); while (true) { - bm_word_t res = Atomic::cmpxchg(nw, pw, w); + bm_word_t res = Atomic::cmpxchg(pw, w, nw); if (res == w) break; w = res; nw = value ? (w | ~mr) : (w & mr); @@ -640,7 +640,7 @@ table[i] = num_set_bits(i); } - if (!Atomic::replace_if_null(table, &_pop_count_table)) { + if (!Atomic::replace_if_null(&_pop_count_table, table)) { guarantee(_pop_count_table != NULL, "invariant"); FREE_C_HEAP_ARRAY(idx_t, table); } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/utilities/bitMap.inline.hpp --- a/src/hotspot/share/utilities/bitMap.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/utilities/bitMap.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -72,7 +72,7 @@ if (new_val == old_val) { return false; // Someone else beat us to it. } - const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val, memory_order); + const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order); if (cur_val == old_val) { return true; // Success. } @@ -91,7 +91,7 @@ if (new_val == old_val) { return false; // Someone else beat us to it. } - const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val, memory_order); + const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order); if (cur_val == old_val) { return true; // Success. } diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/utilities/concurrentHashTable.inline.hpp --- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -145,7 +145,7 @@ if (is_locked()) { return false; } - if (Atomic::cmpxchg(node, &_first, expect) == expect) { + if (Atomic::cmpxchg(&_first, expect, node) == expect) { return true; } return false; @@ -160,7 +160,7 @@ } // We will expect a clean first pointer. Node* tmp = first(); - if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) { + if (Atomic::cmpxchg(&_first, tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) { return true; } return false; diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/utilities/debug.cpp --- a/src/hotspot/share/utilities/debug.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/utilities/debug.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -323,7 +323,7 @@ // same time. To avoid dumping the heap or executing the data collection // commands multiple times we just do it once when the first threads reports // the error. - if (Atomic::cmpxchg(1, &out_of_memory_reported, 0) == 0) { + if (Atomic::cmpxchg(&out_of_memory_reported, 0, 1) == 0) { // create heap dump before OnOutOfMemoryError commands are executed if (HeapDumpOnOutOfMemoryError) { tty->print_cr("java.lang.OutOfMemoryError: %s", message); @@ -762,7 +762,7 @@ // Store Context away. if (ucVoid) { const intx my_tid = os::current_thread_id(); - if (Atomic::cmpxchg(my_tid, &g_asserting_thread, (intx)0) == 0) { + if (Atomic::cmpxchg(&g_asserting_thread, (intx)0, my_tid) == 0) { store_context(ucVoid); g_assertion_context = &g_stored_assertion_context; } @@ -772,4 +772,3 @@ return false; } #endif // CAN_SHOW_REGISTERS_ON_ASSERT - diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/utilities/lockFreeStack.hpp --- a/src/hotspot/share/utilities/lockFreeStack.hpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/utilities/lockFreeStack.hpp Mon Nov 25 12:33:15 2019 +0100 @@ -65,7 +65,7 @@ do { old = cur; set_next(*last, cur); - cur = Atomic::cmpxchg(first, &_top, cur); + cur = Atomic::cmpxchg(&_top, cur, first); } while (old != cur); } @@ -91,7 +91,7 @@ new_top = next(*result); } // CAS even on empty pop, for consistent membar bahavior. - result = Atomic::cmpxchg(new_top, &_top, result); + result = Atomic::cmpxchg(&_top, result, new_top); } while (result != old); if (result != NULL) { set_next(*result, NULL); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/utilities/singleWriterSynchronizer.cpp --- a/src/hotspot/share/utilities/singleWriterSynchronizer.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/utilities/singleWriterSynchronizer.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -64,7 +64,7 @@ do { old = value; *new_ptr = ++value; - value = Atomic::cmpxchg(value, &_enter, old); + value = Atomic::cmpxchg(&_enter, old, value); } while (old != value); // Critical sections entered before we changed the polarity will use // the old exit counter. Critical sections entered after the change diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/utilities/vmError.cpp --- a/src/hotspot/share/utilities/vmError.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/utilities/vmError.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -1365,7 +1365,7 @@ } intptr_t mytid = os::current_thread_id(); if (_first_error_tid == -1 && - Atomic::cmpxchg(mytid, &_first_error_tid, (intptr_t)-1) == -1) { + Atomic::cmpxchg(&_first_error_tid, (intptr_t)-1, mytid) == -1) { // Initialize time stamps to use the same base. out.time_stamp().update_to(1); diff -r 4cbfa5077d68 -r 623722a6aeb9 src/hotspot/share/utilities/waitBarrier_generic.cpp --- a/src/hotspot/share/utilities/waitBarrier_generic.cpp Mon Nov 25 12:32:40 2019 +0100 +++ b/src/hotspot/share/utilities/waitBarrier_generic.cpp Mon Nov 25 12:33:15 2019 +0100 @@ -48,7 +48,7 @@ assert(w > 0, "Bad counting"); // We need an exact count which never goes below zero, // otherwise the semaphore may be signalled too many times. - if (Atomic::cmpxchg(w - 1, &_waiters, w) == w) { + if (Atomic::cmpxchg(&_waiters, w, w - 1) == w) { _sem_barrier.signal(); return w - 1; }