|
1 /* |
|
2 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_VM_RUNTIME_ACCESSBACKEND_HPP |
|
26 #define SHARE_VM_RUNTIME_ACCESSBACKEND_HPP |
|
27 |
|
28 #include "metaprogramming/conditional.hpp" |
|
29 #include "metaprogramming/enableIf.hpp" |
|
30 #include "metaprogramming/integralConstant.hpp" |
|
31 #include "utilities/debug.hpp" |
|
32 #include "utilities/globalDefinitions.hpp" |
|
33 |
|
34 // This metafunction returns either oop or narrowOop depending on whether |
|
35 // an access needs to use compressed oops or not. |
|
36 template <DecoratorSet decorators> |
|
37 struct HeapOopType: AllStatic { |
|
38 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value && |
|
39 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value; |
|
40 typedef typename Conditional<needs_oop_compress, narrowOop, oop>::type type; |
|
41 }; |
|
42 |
|
43 namespace AccessInternal { |
|
44 enum BarrierType { |
|
45 BARRIER_STORE, |
|
46 BARRIER_STORE_AT, |
|
47 BARRIER_LOAD, |
|
48 BARRIER_LOAD_AT, |
|
49 BARRIER_ATOMIC_CMPXCHG, |
|
50 BARRIER_ATOMIC_CMPXCHG_AT, |
|
51 BARRIER_ATOMIC_XCHG, |
|
52 BARRIER_ATOMIC_XCHG_AT, |
|
53 BARRIER_ARRAYCOPY, |
|
54 BARRIER_CLONE |
|
55 }; |
|
56 |
|
57 template <DecoratorSet decorators> |
|
58 struct MustConvertCompressedOop: public IntegralConstant<bool, |
|
59 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value && |
|
60 HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value && |
|
61 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> {}; |
|
62 |
|
63 // This metafunction returns an appropriate oop type if the value is oop-like |
|
64 // and otherwise returns the same type T. |
|
65 template <DecoratorSet decorators, typename T> |
|
66 struct EncodedType: AllStatic { |
|
67 typedef typename Conditional< |
|
68 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value, |
|
69 typename HeapOopType<decorators>::type, T>::type type; |
|
70 }; |
|
71 |
|
72 template <DecoratorSet decorators> |
|
73 inline typename HeapOopType<decorators>::type* |
|
74 oop_field_addr(oop base, ptrdiff_t byte_offset) { |
|
75 return reinterpret_cast<typename HeapOopType<decorators>::type*>( |
|
76 reinterpret_cast<intptr_t>((void*)base) + byte_offset); |
|
77 } |
|
78 |
|
79 // This metafunction returns whether it is possible for a type T to require |
|
80 // locking to support wide atomics or not. |
|
81 template <typename T> |
|
82 #ifdef SUPPORTS_NATIVE_CX8 |
|
83 struct PossiblyLockedAccess: public IntegralConstant<bool, false> {}; |
|
84 #else |
|
85 struct PossiblyLockedAccess: public IntegralConstant<bool, (sizeof(T) > 4)>::value> {}; |
|
86 #endif |
|
87 |
|
88 template <DecoratorSet decorators, typename T> |
|
89 struct AccessFunctionTypes { |
|
90 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset); |
|
91 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value); |
|
92 typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value); |
|
93 typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset); |
|
94 |
|
95 typedef T (*load_func_t)(void* addr); |
|
96 typedef void (*store_func_t)(void* addr, T value); |
|
97 typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value); |
|
98 typedef T (*atomic_xchg_func_t)(T new_value, void* addr); |
|
99 |
|
100 typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length); |
|
101 typedef void (*clone_func_t)(oop src, oop dst, size_t size); |
|
102 }; |
|
103 |
|
104 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {}; |
|
105 |
|
106 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \ |
|
107 template <DecoratorSet decorators, typename T> \ |
|
108 struct AccessFunction<decorators, T, bt>: AllStatic{ \ |
|
109 typedef typename AccessFunctionTypes<decorators, T>::func type; \ |
|
110 } |
|
111 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t); |
|
112 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t); |
|
113 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t); |
|
114 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t); |
|
115 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t); |
|
116 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t); |
|
117 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t); |
|
118 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t); |
|
119 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t); |
|
120 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t); |
|
121 #undef ACCESS_GENERATE_ACCESS_FUNCTION |
|
122 |
|
123 template <DecoratorSet decorators, typename T, BarrierType barrier_type> |
|
124 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier(); |
|
125 |
|
126 template <DecoratorSet decorators, typename T, BarrierType barrier_type> |
|
127 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier(); |
|
128 |
|
129 class AccessLocker VALUE_OBJ_CLASS_SPEC { |
|
130 public: |
|
131 AccessLocker(); |
|
132 ~AccessLocker(); |
|
133 }; |
|
134 bool wide_atomic_needs_locking(); |
|
135 |
|
136 void* field_addr(oop base, ptrdiff_t offset); |
|
137 |
|
138 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow |
|
139 // faster build times, given how frequently included access is. |
|
140 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length); |
|
141 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length); |
|
142 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length); |
|
143 |
|
144 void arraycopy_disjoint_words(void* src, void* dst, size_t length); |
|
145 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length); |
|
146 |
|
147 template<typename T> |
|
148 void arraycopy_conjoint(T* src, T* dst, size_t length); |
|
149 template<typename T> |
|
150 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length); |
|
151 template<typename T> |
|
152 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length); |
|
153 } |
|
154 |
|
155 // This mask specifies what decorators are relevant for raw accesses. When passing |
|
156 // accesses to the raw layer, irrelevant decorators are removed. |
|
157 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK | |
|
158 ARRAYCOPY_DECORATOR_MASK | OOP_DECORATOR_MASK; |
|
159 |
|
160 // The RawAccessBarrier performs raw accesses with additional knowledge of |
|
161 // memory ordering, so that OrderAccess/Atomic is called when necessary. |
|
162 // It additionally handles compressed oops, and hence is not completely "raw" |
|
163 // strictly speaking. |
|
164 template <DecoratorSet decorators> |
|
165 class RawAccessBarrier: public AllStatic { |
|
166 protected: |
|
167 static inline void* field_addr(oop base, ptrdiff_t byte_offset) { |
|
168 return AccessInternal::field_addr(base, byte_offset); |
|
169 } |
|
170 |
|
171 protected: |
|
172 // Only encode if INTERNAL_VALUE_IS_OOP |
|
173 template <DecoratorSet idecorators, typename T> |
|
174 static inline typename EnableIf< |
|
175 AccessInternal::MustConvertCompressedOop<idecorators>::value, |
|
176 typename HeapOopType<idecorators>::type>::type |
|
177 encode_internal(T value); |
|
178 |
|
179 template <DecoratorSet idecorators, typename T> |
|
180 static inline typename EnableIf< |
|
181 !AccessInternal::MustConvertCompressedOop<idecorators>::value, T>::type |
|
182 encode_internal(T value) { |
|
183 return value; |
|
184 } |
|
185 |
|
186 template <typename T> |
|
187 static inline typename AccessInternal::EncodedType<decorators, T>::type |
|
188 encode(T value) { |
|
189 return encode_internal<decorators, T>(value); |
|
190 } |
|
191 |
|
192 // Only decode if INTERNAL_VALUE_IS_OOP |
|
193 template <DecoratorSet idecorators, typename T> |
|
194 static inline typename EnableIf< |
|
195 AccessInternal::MustConvertCompressedOop<idecorators>::value, T>::type |
|
196 decode_internal(typename HeapOopType<idecorators>::type value); |
|
197 |
|
198 template <DecoratorSet idecorators, typename T> |
|
199 static inline typename EnableIf< |
|
200 !AccessInternal::MustConvertCompressedOop<idecorators>::value, T>::type |
|
201 decode_internal(T value) { |
|
202 return value; |
|
203 } |
|
204 |
|
205 template <typename T> |
|
206 static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) { |
|
207 return decode_internal<decorators, T>(value); |
|
208 } |
|
209 |
|
210 protected: |
|
211 template <DecoratorSet ds, typename T> |
|
212 static typename EnableIf< |
|
213 HasDecorator<ds, MO_SEQ_CST>::value, T>::type |
|
214 load_internal(void* addr); |
|
215 |
|
216 template <DecoratorSet ds, typename T> |
|
217 static typename EnableIf< |
|
218 HasDecorator<ds, MO_ACQUIRE>::value, T>::type |
|
219 load_internal(void* addr); |
|
220 |
|
221 template <DecoratorSet ds, typename T> |
|
222 static typename EnableIf< |
|
223 HasDecorator<ds, MO_RELAXED>::value, T>::type |
|
224 load_internal(void* addr); |
|
225 |
|
226 template <DecoratorSet ds, typename T> |
|
227 static inline typename EnableIf< |
|
228 HasDecorator<ds, MO_VOLATILE>::value, T>::type |
|
229 load_internal(void* addr) { |
|
230 return *reinterpret_cast<const volatile T*>(addr); |
|
231 } |
|
232 |
|
233 template <DecoratorSet ds, typename T> |
|
234 static inline typename EnableIf< |
|
235 HasDecorator<ds, MO_UNORDERED>::value, T>::type |
|
236 load_internal(void* addr) { |
|
237 return *reinterpret_cast<const T*>(addr); |
|
238 } |
|
239 |
|
240 template <DecoratorSet ds, typename T> |
|
241 static typename EnableIf< |
|
242 HasDecorator<ds, MO_SEQ_CST>::value>::type |
|
243 store_internal(void* addr, T value); |
|
244 |
|
245 template <DecoratorSet ds, typename T> |
|
246 static typename EnableIf< |
|
247 HasDecorator<ds, MO_RELEASE>::value>::type |
|
248 store_internal(void* addr, T value); |
|
249 |
|
250 template <DecoratorSet ds, typename T> |
|
251 static typename EnableIf< |
|
252 HasDecorator<ds, MO_RELAXED>::value>::type |
|
253 store_internal(void* addr, T value); |
|
254 |
|
255 template <DecoratorSet ds, typename T> |
|
256 static inline typename EnableIf< |
|
257 HasDecorator<ds, MO_VOLATILE>::value>::type |
|
258 store_internal(void* addr, T value) { |
|
259 (void)const_cast<T&>(*reinterpret_cast<volatile T*>(addr) = value); |
|
260 } |
|
261 |
|
262 template <DecoratorSet ds, typename T> |
|
263 static inline typename EnableIf< |
|
264 HasDecorator<ds, MO_UNORDERED>::value>::type |
|
265 store_internal(void* addr, T value) { |
|
266 *reinterpret_cast<T*>(addr) = value; |
|
267 } |
|
268 |
|
269 template <DecoratorSet ds, typename T> |
|
270 static typename EnableIf< |
|
271 HasDecorator<ds, MO_SEQ_CST>::value, T>::type |
|
272 atomic_cmpxchg_internal(T new_value, void* addr, T compare_value); |
|
273 |
|
274 template <DecoratorSet ds, typename T> |
|
275 static typename EnableIf< |
|
276 HasDecorator<ds, MO_RELAXED>::value, T>::type |
|
277 atomic_cmpxchg_internal(T new_value, void* addr, T compare_value); |
|
278 |
|
279 template <DecoratorSet ds, typename T> |
|
280 static typename EnableIf< |
|
281 HasDecorator<ds, MO_SEQ_CST>::value, T>::type |
|
282 atomic_xchg_internal(T new_value, void* addr); |
|
283 |
|
284 // The following *_locked mechanisms serve the purpose of handling atomic operations |
|
285 // that are larger than a machine can handle, and then possibly opt for using |
|
286 // a slower path using a mutex to perform the operation. |
|
287 |
|
288 template <DecoratorSet ds, typename T> |
|
289 static inline typename EnableIf< |
|
290 !AccessInternal::PossiblyLockedAccess<T>::value, T>::type |
|
291 atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) { |
|
292 return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value); |
|
293 } |
|
294 |
|
295 template <DecoratorSet ds, typename T> |
|
296 static typename EnableIf< |
|
297 AccessInternal::PossiblyLockedAccess<T>::value, T>::type |
|
298 atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value); |
|
299 |
|
300 template <DecoratorSet ds, typename T> |
|
301 static inline typename EnableIf< |
|
302 !AccessInternal::PossiblyLockedAccess<T>::value, T>::type |
|
303 atomic_xchg_maybe_locked(T new_value, void* addr) { |
|
304 return atomic_xchg_internal<ds>(new_value, addr); |
|
305 } |
|
306 |
|
307 template <DecoratorSet ds, typename T> |
|
308 static typename EnableIf< |
|
309 AccessInternal::PossiblyLockedAccess<T>::value, T>::type |
|
310 atomic_xchg_maybe_locked(T new_value, void* addr); |
|
311 |
|
312 public: |
|
313 template <typename T> |
|
314 static inline void store(void* addr, T value) { |
|
315 store_internal<decorators>(addr, value); |
|
316 } |
|
317 |
|
318 template <typename T> |
|
319 static inline T load(void* addr) { |
|
320 return load_internal<decorators, T>(addr); |
|
321 } |
|
322 |
|
323 template <typename T> |
|
324 static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) { |
|
325 return atomic_cmpxchg_maybe_locked<decorators>(new_value, addr, compare_value); |
|
326 } |
|
327 |
|
328 template <typename T> |
|
329 static inline T atomic_xchg(T new_value, void* addr) { |
|
330 return atomic_xchg_maybe_locked<decorators>(new_value, addr); |
|
331 } |
|
332 |
|
333 template <typename T> |
|
334 static bool arraycopy(T* src, T* dst, size_t length); |
|
335 |
|
336 template <typename T> |
|
337 static void oop_store(void* addr, T value); |
|
338 template <typename T> |
|
339 static void oop_store_at(oop base, ptrdiff_t offset, T value); |
|
340 |
|
341 template <typename T> |
|
342 static T oop_load(void* addr); |
|
343 template <typename T> |
|
344 static T oop_load_at(oop base, ptrdiff_t offset); |
|
345 |
|
346 template <typename T> |
|
347 static T oop_atomic_cmpxchg(T new_value, void* addr, T compare_value); |
|
348 template <typename T> |
|
349 static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value); |
|
350 |
|
351 template <typename T> |
|
352 static T oop_atomic_xchg(T new_value, void* addr); |
|
353 template <typename T> |
|
354 static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset); |
|
355 |
|
356 template <typename T> |
|
357 static void store_at(oop base, ptrdiff_t offset, T value) { |
|
358 store(field_addr(base, offset), value); |
|
359 } |
|
360 |
|
361 template <typename T> |
|
362 static T load_at(oop base, ptrdiff_t offset) { |
|
363 return load<T>(field_addr(base, offset)); |
|
364 } |
|
365 |
|
366 template <typename T> |
|
367 static T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { |
|
368 return atomic_cmpxchg(new_value, field_addr(base, offset), compare_value); |
|
369 } |
|
370 |
|
371 template <typename T> |
|
372 static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { |
|
373 return atomic_xchg(new_value, field_addr(base, offset)); |
|
374 } |
|
375 |
|
376 template <typename T> |
|
377 static bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length); |
|
378 static bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length); |
|
379 |
|
380 static void clone(oop src, oop dst, size_t size); |
|
381 }; |
|
382 |
|
383 #endif // SHARE_VM_RUNTIME_ACCESSBACKEND_HPP |