diff -r 28eaf7a99a8c -r ea0a16ba6ac0 src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp --- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp Mon May 07 18:23:59 2018 +0800 +++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp Mon May 07 12:28:11 2018 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,12 +32,13 @@ : Atomic::FetchAndAdd > { template - D fetch_and_add(I add_value, D volatile* dest) const; + D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const; }; template<> template -inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const { +inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, + atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; @@ -51,7 +52,8 @@ template<> template inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest) const { + T volatile* dest, + atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "xchgl (%2),%0" : "=r" (exchange_value) @@ -65,7 +67,7 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, T volatile* dest, T compare_value, - cmpxchg_memory_order /* order */) const { + atomic_memory_order /* order */) const { STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ( "lock cmpxchgb %1,(%3)" : "=a" (exchange_value) @@ -79,7 +81,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, - cmpxchg_memory_order /* order */) const { + atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "lock cmpxchgl %1,(%3)" : "=a" (exchange_value) @@ -91,7 +93,8 @@ #ifdef AMD64 template<> template -inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const { +inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, + atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; @@ -105,7 +108,8 @@ template<> template inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest) const { + T volatile* dest, + atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("xchgq (%2),%0" : "=r" (exchange_value) @@ -119,7 +123,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, - cmpxchg_memory_order /* order */) const { + atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)" : "=a" (exchange_value) @@ -141,7 +145,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, - cmpxchg_memory_order order) const { + atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); return cmpxchg_using_helper(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); }