49906
|
1 |
/*
|
|
2 |
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#include "precompiled.hpp"
|
|
26 |
#include "c1/c1_Defs.hpp"
|
|
27 |
#include "c1/c1_LIRGenerator.hpp"
|
|
28 |
#include "gc/shared/c1/barrierSetC1.hpp"
|
|
29 |
#include "utilities/macros.hpp"
|
|
30 |
|
|
31 |
#ifndef PATCHED_ADDR
|
|
32 |
#define PATCHED_ADDR (max_jint)
|
|
33 |
#endif
|
|
34 |
|
|
35 |
#ifdef ASSERT
|
|
36 |
#define __ gen->lir(__FILE__, __LINE__)->
|
|
37 |
#else
|
|
38 |
#define __ gen->lir()->
|
|
39 |
#endif
|
|
40 |
|
|
41 |
LIR_Opr BarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
|
|
42 |
DecoratorSet decorators = access.decorators();
|
50728
|
43 |
bool is_array = (decorators & IS_ARRAY) != 0;
|
49906
|
44 |
bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
|
|
45 |
|
|
46 |
LIRItem& base = access.base().item();
|
|
47 |
LIR_Opr offset = access.offset().opr();
|
|
48 |
LIRGenerator *gen = access.gen();
|
|
49 |
|
|
50 |
LIR_Opr addr_opr;
|
50728
|
51 |
if (is_array) {
|
49906
|
52 |
addr_opr = LIR_OprFact::address(gen->emit_array_address(base.result(), offset, access.type()));
|
|
53 |
} else if (needs_patching) {
|
|
54 |
// we need to patch the offset in the instruction so don't allow
|
|
55 |
// generate_address to try to be smart about emitting the -1.
|
|
56 |
// Otherwise the patching code won't know how to find the
|
|
57 |
// instruction to patch.
|
|
58 |
addr_opr = LIR_OprFact::address(new LIR_Address(base.result(), PATCHED_ADDR, access.type()));
|
|
59 |
} else {
|
|
60 |
addr_opr = LIR_OprFact::address(gen->generate_address(base.result(), offset, 0, 0, access.type()));
|
|
61 |
}
|
|
62 |
|
|
63 |
if (resolve_in_register) {
|
|
64 |
LIR_Opr resolved_addr = gen->new_pointer_register();
|
|
65 |
__ leal(addr_opr, resolved_addr);
|
|
66 |
resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, access.type()));
|
|
67 |
return resolved_addr;
|
|
68 |
} else {
|
|
69 |
return addr_opr;
|
|
70 |
}
|
|
71 |
}
|
|
72 |
|
|
73 |
void BarrierSetC1::store_at(LIRAccess& access, LIR_Opr value) {
|
|
74 |
DecoratorSet decorators = access.decorators();
|
|
75 |
bool in_heap = (decorators & IN_HEAP) != 0;
|
|
76 |
assert(in_heap, "not supported yet");
|
|
77 |
|
|
78 |
LIR_Opr resolved = resolve_address(access, false);
|
|
79 |
access.set_resolved_addr(resolved);
|
|
80 |
store_at_resolved(access, value);
|
|
81 |
}
|
|
82 |
|
|
83 |
void BarrierSetC1::load_at(LIRAccess& access, LIR_Opr result) {
|
|
84 |
DecoratorSet decorators = access.decorators();
|
|
85 |
bool in_heap = (decorators & IN_HEAP) != 0;
|
|
86 |
assert(in_heap, "not supported yet");
|
|
87 |
|
|
88 |
LIR_Opr resolved = resolve_address(access, false);
|
|
89 |
access.set_resolved_addr(resolved);
|
|
90 |
load_at_resolved(access, result);
|
|
91 |
}
|
|
92 |
|
|
93 |
LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
|
|
94 |
DecoratorSet decorators = access.decorators();
|
|
95 |
bool in_heap = (decorators & IN_HEAP) != 0;
|
|
96 |
assert(in_heap, "not supported yet");
|
|
97 |
|
|
98 |
access.load_address();
|
|
99 |
|
|
100 |
LIR_Opr resolved = resolve_address(access, true);
|
|
101 |
access.set_resolved_addr(resolved);
|
|
102 |
return atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
|
|
103 |
}
|
|
104 |
|
|
105 |
LIR_Opr BarrierSetC1::atomic_xchg_at(LIRAccess& access, LIRItem& value) {
|
|
106 |
DecoratorSet decorators = access.decorators();
|
|
107 |
bool in_heap = (decorators & IN_HEAP) != 0;
|
|
108 |
assert(in_heap, "not supported yet");
|
|
109 |
|
|
110 |
access.load_address();
|
|
111 |
|
|
112 |
LIR_Opr resolved = resolve_address(access, true);
|
|
113 |
access.set_resolved_addr(resolved);
|
|
114 |
return atomic_xchg_at_resolved(access, value);
|
|
115 |
}
|
|
116 |
|
|
117 |
LIR_Opr BarrierSetC1::atomic_add_at(LIRAccess& access, LIRItem& value) {
|
|
118 |
DecoratorSet decorators = access.decorators();
|
|
119 |
bool in_heap = (decorators & IN_HEAP) != 0;
|
|
120 |
assert(in_heap, "not supported yet");
|
|
121 |
|
|
122 |
access.load_address();
|
|
123 |
|
|
124 |
LIR_Opr resolved = resolve_address(access, true);
|
|
125 |
access.set_resolved_addr(resolved);
|
|
126 |
return atomic_add_at_resolved(access, value);
|
|
127 |
}
|
|
128 |
|
|
129 |
void BarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
|
|
130 |
DecoratorSet decorators = access.decorators();
|
|
131 |
bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
|
|
132 |
bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
|
|
133 |
bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
|
|
134 |
LIRGenerator* gen = access.gen();
|
|
135 |
|
|
136 |
if (mask_boolean) {
|
|
137 |
value = gen->mask_boolean(access.base().opr(), value, access.access_emit_info());
|
|
138 |
}
|
|
139 |
|
|
140 |
if (is_volatile && os::is_MP()) {
|
|
141 |
__ membar_release();
|
|
142 |
}
|
|
143 |
|
|
144 |
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
|
|
145 |
if (is_volatile && !needs_patching) {
|
|
146 |
gen->volatile_field_store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info());
|
|
147 |
} else {
|
|
148 |
__ store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info(), patch_code);
|
|
149 |
}
|
|
150 |
|
|
151 |
if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
152 |
__ membar();
|
|
153 |
}
|
|
154 |
}
|
|
155 |
|
|
156 |
void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
|
|
157 |
LIRGenerator *gen = access.gen();
|
|
158 |
DecoratorSet decorators = access.decorators();
|
|
159 |
bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
|
|
160 |
bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
|
|
161 |
bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
|
|
162 |
|
|
163 |
if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) {
|
|
164 |
__ membar();
|
|
165 |
}
|
|
166 |
|
|
167 |
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
|
|
168 |
if (is_volatile && !needs_patching) {
|
|
169 |
gen->volatile_field_load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info());
|
|
170 |
} else {
|
|
171 |
__ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code);
|
|
172 |
}
|
|
173 |
|
|
174 |
if (is_volatile && os::is_MP()) {
|
|
175 |
__ membar_acquire();
|
|
176 |
}
|
|
177 |
|
|
178 |
/* Normalize boolean value returned by unsafe operation, i.e., value != 0 ? value = true : value false. */
|
|
179 |
if (mask_boolean) {
|
|
180 |
LabelObj* equalZeroLabel = new LabelObj();
|
|
181 |
__ cmp(lir_cond_equal, result, 0);
|
|
182 |
__ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
|
|
183 |
__ move(LIR_OprFact::intConst(1), result);
|
|
184 |
__ branch_destination(equalZeroLabel->label());
|
|
185 |
}
|
|
186 |
}
|
|
187 |
|
|
188 |
LIR_Opr BarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
|
|
189 |
LIRGenerator *gen = access.gen();
|
|
190 |
return gen->atomic_cmpxchg(access.type(), access.resolved_addr(), cmp_value, new_value);
|
|
191 |
}
|
|
192 |
|
|
193 |
LIR_Opr BarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
|
|
194 |
LIRGenerator *gen = access.gen();
|
|
195 |
return gen->atomic_xchg(access.type(), access.resolved_addr(), value);
|
|
196 |
}
|
|
197 |
|
|
198 |
LIR_Opr BarrierSetC1::atomic_add_at_resolved(LIRAccess& access, LIRItem& value) {
|
|
199 |
LIRGenerator *gen = access.gen();
|
|
200 |
return gen->atomic_add(access.type(), access.resolved_addr(), value);
|
|
201 |
}
|
|
202 |
|
|
203 |
void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) {
|
|
204 |
// We might be reading the value of the referent field of a
|
|
205 |
// Reference object in order to attach it back to the live
|
|
206 |
// object graph. If G1 is enabled then we need to record
|
|
207 |
// the value that is being returned in an SATB log buffer.
|
|
208 |
//
|
|
209 |
// We need to generate code similar to the following...
|
|
210 |
//
|
|
211 |
// if (offset == java_lang_ref_Reference::referent_offset) {
|
|
212 |
// if (src != NULL) {
|
|
213 |
// if (klass(src)->reference_type() != REF_NONE) {
|
|
214 |
// pre_barrier(..., value, ...);
|
|
215 |
// }
|
|
216 |
// }
|
|
217 |
// }
|
|
218 |
|
|
219 |
bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
|
|
220 |
bool gen_offset_check = true; // Assume we need to generate the offset guard.
|
|
221 |
bool gen_source_check = true; // Assume we need to check the src object for null.
|
|
222 |
bool gen_type_check = true; // Assume we need to check the reference_type.
|
|
223 |
|
|
224 |
LIRGenerator *gen = access.gen();
|
|
225 |
|
|
226 |
LIRItem& base = access.base().item();
|
|
227 |
LIR_Opr offset = access.offset().opr();
|
|
228 |
|
|
229 |
if (offset->is_constant()) {
|
|
230 |
LIR_Const* constant = offset->as_constant_ptr();
|
|
231 |
jlong off_con = (constant->type() == T_INT ?
|
|
232 |
(jlong)constant->as_jint() :
|
|
233 |
constant->as_jlong());
|
|
234 |
|
|
235 |
|
|
236 |
if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
|
|
237 |
// The constant offset is something other than referent_offset.
|
|
238 |
// We can skip generating/checking the remaining guards and
|
|
239 |
// skip generation of the code stub.
|
|
240 |
gen_pre_barrier = false;
|
|
241 |
} else {
|
|
242 |
// The constant offset is the same as referent_offset -
|
|
243 |
// we do not need to generate a runtime offset check.
|
|
244 |
gen_offset_check = false;
|
|
245 |
}
|
|
246 |
}
|
|
247 |
|
|
248 |
// We don't need to generate stub if the source object is an array
|
|
249 |
if (gen_pre_barrier && base.type()->is_array()) {
|
|
250 |
gen_pre_barrier = false;
|
|
251 |
}
|
|
252 |
|
|
253 |
if (gen_pre_barrier) {
|
|
254 |
// We still need to continue with the checks.
|
|
255 |
if (base.is_constant()) {
|
|
256 |
ciObject* src_con = base.get_jobject_constant();
|
|
257 |
guarantee(src_con != NULL, "no source constant");
|
|
258 |
|
|
259 |
if (src_con->is_null_object()) {
|
|
260 |
// The constant src object is null - We can skip
|
|
261 |
// generating the code stub.
|
|
262 |
gen_pre_barrier = false;
|
|
263 |
} else {
|
|
264 |
// Non-null constant source object. We still have to generate
|
|
265 |
// the slow stub - but we don't need to generate the runtime
|
|
266 |
// null object check.
|
|
267 |
gen_source_check = false;
|
|
268 |
}
|
|
269 |
}
|
|
270 |
}
|
|
271 |
if (gen_pre_barrier && !PatchALot) {
|
|
272 |
// Can the klass of object be statically determined to be
|
|
273 |
// a sub-class of Reference?
|
|
274 |
ciType* type = base.value()->declared_type();
|
|
275 |
if ((type != NULL) && type->is_loaded()) {
|
|
276 |
if (type->is_subtype_of(gen->compilation()->env()->Reference_klass())) {
|
|
277 |
gen_type_check = false;
|
|
278 |
} else if (type->is_klass() &&
|
|
279 |
!gen->compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
|
|
280 |
// Not Reference and not Object klass.
|
|
281 |
gen_pre_barrier = false;
|
|
282 |
}
|
|
283 |
}
|
|
284 |
}
|
|
285 |
|
|
286 |
if (gen_pre_barrier) {
|
|
287 |
// We can have generate one runtime check here. Let's start with
|
|
288 |
// the offset check.
|
|
289 |
if (gen_offset_check) {
|
|
290 |
// if (offset != referent_offset) -> continue
|
|
291 |
// If offset is an int then we can do the comparison with the
|
|
292 |
// referent_offset constant; otherwise we need to move
|
|
293 |
// referent_offset into a temporary register and generate
|
|
294 |
// a reg-reg compare.
|
|
295 |
|
|
296 |
LIR_Opr referent_off;
|
|
297 |
|
|
298 |
if (offset->type() == T_INT) {
|
|
299 |
referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
|
|
300 |
} else {
|
|
301 |
assert(offset->type() == T_LONG, "what else?");
|
|
302 |
referent_off = gen->new_register(T_LONG);
|
|
303 |
__ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
|
|
304 |
}
|
|
305 |
__ cmp(lir_cond_notEqual, offset, referent_off);
|
|
306 |
__ branch(lir_cond_notEqual, offset->type(), cont->label());
|
|
307 |
}
|
|
308 |
if (gen_source_check) {
|
|
309 |
// offset is a const and equals referent offset
|
|
310 |
// if (source == null) -> continue
|
|
311 |
__ cmp(lir_cond_equal, base.result(), LIR_OprFact::oopConst(NULL));
|
|
312 |
__ branch(lir_cond_equal, T_OBJECT, cont->label());
|
|
313 |
}
|
|
314 |
LIR_Opr src_klass = gen->new_register(T_OBJECT);
|
|
315 |
if (gen_type_check) {
|
|
316 |
// We have determined that offset == referent_offset && src != null.
|
|
317 |
// if (src->_klass->_reference_type == REF_NONE) -> continue
|
|
318 |
__ move(new LIR_Address(base.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
|
|
319 |
LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
|
|
320 |
LIR_Opr reference_type = gen->new_register(T_INT);
|
|
321 |
__ move(reference_type_addr, reference_type);
|
|
322 |
__ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
|
|
323 |
__ branch(lir_cond_equal, T_INT, cont->label());
|
|
324 |
}
|
|
325 |
}
|
|
326 |
}
|