equal
deleted
inserted
replaced
1 /* |
1 /* |
2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
30 template<size_t byte_size> |
30 template<size_t byte_size> |
31 struct Atomic::PlatformAdd |
31 struct Atomic::PlatformAdd |
32 : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > |
32 : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > |
33 { |
33 { |
34 template<typename I, typename D> |
34 template<typename I, typename D> |
35 D fetch_and_add(I add_value, D volatile* dest) const; |
35 D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const; |
36 }; |
36 }; |
37 |
37 |
38 template<> |
38 template<> |
39 template<typename I, typename D> |
39 template<typename I, typename D> |
40 inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const { |
40 inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, |
|
41 atomic_memory_order /* order */) const { |
41 STATIC_ASSERT(4 == sizeof(I)); |
42 STATIC_ASSERT(4 == sizeof(I)); |
42 STATIC_ASSERT(4 == sizeof(D)); |
43 STATIC_ASSERT(4 == sizeof(D)); |
43 D old_value; |
44 D old_value; |
44 __asm__ volatile ( "lock xaddl %0,(%2)" |
45 __asm__ volatile ( "lock xaddl %0,(%2)" |
45 : "=r" (old_value) |
46 : "=r" (old_value) |
49 } |
50 } |
50 |
51 |
51 template<> |
52 template<> |
52 template<typename T> |
53 template<typename T> |
53 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, |
54 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, |
54 T volatile* dest) const { |
55 T volatile* dest, |
|
56 atomic_memory_order /* order */) const { |
55 STATIC_ASSERT(4 == sizeof(T)); |
57 STATIC_ASSERT(4 == sizeof(T)); |
56 __asm__ volatile ( "xchgl (%2),%0" |
58 __asm__ volatile ( "xchgl (%2),%0" |
57 : "=r" (exchange_value) |
59 : "=r" (exchange_value) |
58 : "0" (exchange_value), "r" (dest) |
60 : "0" (exchange_value), "r" (dest) |
59 : "memory"); |
61 : "memory"); |
63 template<> |
65 template<> |
64 template<typename T> |
66 template<typename T> |
65 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, |
67 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, |
66 T volatile* dest, |
68 T volatile* dest, |
67 T compare_value, |
69 T compare_value, |
68 cmpxchg_memory_order /* order */) const { |
70 atomic_memory_order /* order */) const { |
69 STATIC_ASSERT(1 == sizeof(T)); |
71 STATIC_ASSERT(1 == sizeof(T)); |
70 __asm__ volatile ( "lock cmpxchgb %1,(%3)" |
72 __asm__ volatile ( "lock cmpxchgb %1,(%3)" |
71 : "=a" (exchange_value) |
73 : "=a" (exchange_value) |
72 : "q" (exchange_value), "a" (compare_value), "r" (dest) |
74 : "q" (exchange_value), "a" (compare_value), "r" (dest) |
73 : "cc", "memory"); |
75 : "cc", "memory"); |
77 template<> |
79 template<> |
78 template<typename T> |
80 template<typename T> |
79 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, |
81 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, |
80 T volatile* dest, |
82 T volatile* dest, |
81 T compare_value, |
83 T compare_value, |
82 cmpxchg_memory_order /* order */) const { |
84 atomic_memory_order /* order */) const { |
83 STATIC_ASSERT(4 == sizeof(T)); |
85 STATIC_ASSERT(4 == sizeof(T)); |
84 __asm__ volatile ( "lock cmpxchgl %1,(%3)" |
86 __asm__ volatile ( "lock cmpxchgl %1,(%3)" |
85 : "=a" (exchange_value) |
87 : "=a" (exchange_value) |
86 : "r" (exchange_value), "a" (compare_value), "r" (dest) |
88 : "r" (exchange_value), "a" (compare_value), "r" (dest) |
87 : "cc", "memory"); |
89 : "cc", "memory"); |
89 } |
91 } |
90 |
92 |
91 #ifdef AMD64 |
93 #ifdef AMD64 |
92 template<> |
94 template<> |
93 template<typename I, typename D> |
95 template<typename I, typename D> |
94 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const { |
96 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, |
|
97 atomic_memory_order /* order */) const { |
95 STATIC_ASSERT(8 == sizeof(I)); |
98 STATIC_ASSERT(8 == sizeof(I)); |
96 STATIC_ASSERT(8 == sizeof(D)); |
99 STATIC_ASSERT(8 == sizeof(D)); |
97 D old_value; |
100 D old_value; |
98 __asm__ __volatile__ ( "lock xaddq %0,(%2)" |
101 __asm__ __volatile__ ( "lock xaddq %0,(%2)" |
99 : "=r" (old_value) |
102 : "=r" (old_value) |
103 } |
106 } |
104 |
107 |
105 template<> |
108 template<> |
106 template<typename T> |
109 template<typename T> |
107 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, |
110 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, |
108 T volatile* dest) const { |
111 T volatile* dest, |
|
112 atomic_memory_order /* order */) const { |
109 STATIC_ASSERT(8 == sizeof(T)); |
113 STATIC_ASSERT(8 == sizeof(T)); |
110 __asm__ __volatile__ ("xchgq (%2),%0" |
114 __asm__ __volatile__ ("xchgq (%2),%0" |
111 : "=r" (exchange_value) |
115 : "=r" (exchange_value) |
112 : "0" (exchange_value), "r" (dest) |
116 : "0" (exchange_value), "r" (dest) |
113 : "memory"); |
117 : "memory"); |
117 template<> |
121 template<> |
118 template<typename T> |
122 template<typename T> |
119 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, |
123 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, |
120 T volatile* dest, |
124 T volatile* dest, |
121 T compare_value, |
125 T compare_value, |
122 cmpxchg_memory_order /* order */) const { |
126 atomic_memory_order /* order */) const { |
123 STATIC_ASSERT(8 == sizeof(T)); |
127 STATIC_ASSERT(8 == sizeof(T)); |
124 __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)" |
128 __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)" |
125 : "=a" (exchange_value) |
129 : "=a" (exchange_value) |
126 : "r" (exchange_value), "a" (compare_value), "r" (dest) |
130 : "r" (exchange_value), "a" (compare_value), "r" (dest) |
127 : "cc", "memory"); |
131 : "cc", "memory"); |
139 template<> |
143 template<> |
140 template<typename T> |
144 template<typename T> |
141 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, |
145 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, |
142 T volatile* dest, |
146 T volatile* dest, |
143 T compare_value, |
147 T compare_value, |
144 cmpxchg_memory_order order) const { |
148 atomic_memory_order /* order */) const { |
145 STATIC_ASSERT(8 == sizeof(T)); |
149 STATIC_ASSERT(8 == sizeof(T)); |
146 return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); |
150 return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); |
147 } |
151 } |
148 |
152 |
149 template<> |
153 template<> |