diff -r 28eaf7a99a8c -r ea0a16ba6ac0 src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp --- a/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp Mon May 07 18:23:59 2018 +0800 +++ b/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp Mon May 07 12:28:11 2018 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,12 +32,13 @@ : Atomic::AddAndFetch > { template - D add_and_fetch(I add_value, D volatile* dest) const; + D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; }; template<> template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -59,7 +60,8 @@ template<> template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { +inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); @@ -82,7 +84,8 @@ template<> template inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest) const { + T volatile* dest, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); T rv = exchange_value; __asm__ volatile( @@ -96,7 +99,8 @@ template<> template inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest) const { + T volatile* dest, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T rv = exchange_value; __asm__ volatile( @@ -123,7 +127,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, - cmpxchg_memory_order order) const { + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); T rv; __asm__ volatile( @@ -139,7 +143,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, - cmpxchg_memory_order order) const { + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T rv; __asm__ volatile(