1 /* |
1 /* |
2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. |
3 * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. |
3 * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. |
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
5 * |
5 * |
6 * This code is free software; you can redistribute it and/or modify it |
6 * This code is free software; you can redistribute it and/or modify it |
7 * under the terms of the GNU General Public License version 2 only, as |
7 * under the terms of the GNU General Public License version 2 only, as |
162 template<size_t byte_size> |
162 template<size_t byte_size> |
163 struct Atomic::PlatformAdd |
163 struct Atomic::PlatformAdd |
164 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > |
164 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > |
165 { |
165 { |
166 template<typename I, typename D> |
166 template<typename I, typename D> |
167 D add_and_fetch(I add_value, D volatile* dest) const; |
167 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; |
168 }; |
168 }; |
169 |
169 |
170 template<> |
170 template<> |
171 template<typename I, typename D> |
171 template<typename I, typename D> |
172 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { |
172 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, |
|
173 atomic_memory_order order) const { |
173 STATIC_ASSERT(4 == sizeof(I)); |
174 STATIC_ASSERT(4 == sizeof(I)); |
174 STATIC_ASSERT(4 == sizeof(D)); |
175 STATIC_ASSERT(4 == sizeof(D)); |
175 |
176 |
176 #ifdef ARM |
177 #ifdef ARM |
177 return add_using_helper<int>(arm_add_and_fetch, add_value, dest); |
178 return add_using_helper<int>(arm_add_and_fetch, add_value, dest); |
184 #endif // ARM |
185 #endif // ARM |
185 } |
186 } |
186 |
187 |
187 template<> |
188 template<> |
188 template<typename I, typename D> |
189 template<typename I, typename D> |
189 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { |
190 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, |
|
191 atomic_memory_order order) const { |
190 STATIC_ASSERT(8 == sizeof(I)); |
192 STATIC_ASSERT(8 == sizeof(I)); |
191 STATIC_ASSERT(8 == sizeof(D)); |
193 STATIC_ASSERT(8 == sizeof(D)); |
192 |
194 |
193 return __sync_add_and_fetch(dest, add_value); |
195 return __sync_add_and_fetch(dest, add_value); |
194 } |
196 } |
195 |
197 |
196 template<> |
198 template<> |
197 template<typename T> |
199 template<typename T> |
198 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, |
200 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, |
199 T volatile* dest) const { |
201 T volatile* dest, |
|
202 atomic_memory_order order) const { |
200 STATIC_ASSERT(4 == sizeof(T)); |
203 STATIC_ASSERT(4 == sizeof(T)); |
201 #ifdef ARM |
204 #ifdef ARM |
202 return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest); |
205 return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest); |
203 #else |
206 #else |
204 #ifdef M68K |
207 #ifdef M68K |
220 } |
223 } |
221 |
224 |
222 template<> |
225 template<> |
223 template<typename T> |
226 template<typename T> |
224 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, |
227 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, |
225 T volatile* dest) const { |
228 T volatile* dest, |
|
229 atomic_memory_order order) const { |
226 STATIC_ASSERT(8 == sizeof(T)); |
230 STATIC_ASSERT(8 == sizeof(T)); |
227 T result = __sync_lock_test_and_set (dest, exchange_value); |
231 T result = __sync_lock_test_and_set (dest, exchange_value); |
228 __sync_synchronize(); |
232 __sync_synchronize(); |
229 return result; |
233 return result; |
230 } |
234 } |
236 template<> |
240 template<> |
237 template<typename T> |
241 template<typename T> |
238 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, |
242 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, |
239 T volatile* dest, |
243 T volatile* dest, |
240 T compare_value, |
244 T compare_value, |
241 cmpxchg_memory_order order) const { |
245 atomic_memory_order order) const { |
242 STATIC_ASSERT(4 == sizeof(T)); |
246 STATIC_ASSERT(4 == sizeof(T)); |
243 #ifdef ARM |
247 #ifdef ARM |
244 return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value); |
248 return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value); |
245 #else |
249 #else |
246 #ifdef M68K |
250 #ifdef M68K |
254 template<> |
258 template<> |
255 template<typename T> |
259 template<typename T> |
256 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, |
260 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, |
257 T volatile* dest, |
261 T volatile* dest, |
258 T compare_value, |
262 T compare_value, |
259 cmpxchg_memory_order order) const { |
263 atomic_memory_order order) const { |
260 STATIC_ASSERT(8 == sizeof(T)); |
264 STATIC_ASSERT(8 == sizeof(T)); |
261 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); |
265 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); |
262 } |
266 } |
263 |
267 |
264 template<> |
268 template<> |