author | jrose |
Tue, 21 Apr 2009 23:21:04 -0700 | |
changeset 2570 | ecc7862946d4 |
parent 2105 | 347008ce7984 |
child 2867 | 69187054225f |
permissions | -rw-r--r-- |
1 | 1 |
/* |
2105 | 2 |
* Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
# include "incls/_precompiled.incl" |
|
26 |
# include "incls/_c1_LIRGenerator.cpp.incl" |
|
27 |
||
28 |
#ifdef ASSERT |
|
29 |
#define __ gen()->lir(__FILE__, __LINE__)-> |
|
30 |
#else |
|
31 |
#define __ gen()->lir()-> |
|
32 |
#endif |
|
33 |
||
34 |
||
35 |
void PhiResolverState::reset(int max_vregs) { |
|
36 |
// Initialize array sizes |
|
37 |
_virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL); |
|
38 |
_virtual_operands.trunc_to(0); |
|
39 |
_other_operands.at_put_grow(max_vregs - 1, NULL, NULL); |
|
40 |
_other_operands.trunc_to(0); |
|
41 |
_vreg_table.at_put_grow(max_vregs - 1, NULL, NULL); |
|
42 |
_vreg_table.trunc_to(0); |
|
43 |
} |
|
44 |
||
45 |
||
46 |
||
47 |
//-------------------------------------------------------------- |
|
48 |
// PhiResolver |
|
49 |
||
50 |
// Resolves cycles: |
|
51 |
// |
|
52 |
// r1 := r2 becomes temp := r1 |
|
53 |
// r2 := r1 r1 := r2 |
|
54 |
// r2 := temp |
|
55 |
// and orders moves: |
|
56 |
// |
|
57 |
// r2 := r3 becomes r1 := r2 |
|
58 |
// r1 := r2 r2 := r3 |
|
59 |
||
60 |
PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs) |
|
61 |
: _gen(gen) |
|
62 |
, _state(gen->resolver_state()) |
|
63 |
, _temp(LIR_OprFact::illegalOpr) |
|
64 |
{ |
|
65 |
// reinitialize the shared state arrays |
|
66 |
_state.reset(max_vregs); |
|
67 |
} |
|
68 |
||
69 |
||
70 |
void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) { |
|
71 |
assert(src->is_valid(), ""); |
|
72 |
assert(dest->is_valid(), ""); |
|
73 |
__ move(src, dest); |
|
74 |
} |
|
75 |
||
76 |
||
77 |
void PhiResolver::move_temp_to(LIR_Opr dest) { |
|
78 |
assert(_temp->is_valid(), ""); |
|
79 |
emit_move(_temp, dest); |
|
80 |
NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr); |
|
81 |
} |
|
82 |
||
83 |
||
84 |
void PhiResolver::move_to_temp(LIR_Opr src) { |
|
85 |
assert(_temp->is_illegal(), ""); |
|
86 |
_temp = _gen->new_register(src->type()); |
|
87 |
emit_move(src, _temp); |
|
88 |
} |
|
89 |
||
90 |
||
91 |
// Traverse assignment graph in depth first order and generate moves in post order |
|
92 |
// ie. two assignments: b := c, a := b start with node c: |
|
93 |
// Call graph: move(NULL, c) -> move(c, b) -> move(b, a) |
|
94 |
// Generates moves in this order: move b to a and move c to b |
|
95 |
// ie. cycle a := b, b := a start with node a |
|
96 |
// Call graph: move(NULL, a) -> move(a, b) -> move(b, a) |
|
97 |
// Generates moves in this order: move b to temp, move a to b, move temp to a |
|
98 |
void PhiResolver::move(ResolveNode* src, ResolveNode* dest) { |
|
99 |
if (!dest->visited()) { |
|
100 |
dest->set_visited(); |
|
101 |
for (int i = dest->no_of_destinations()-1; i >= 0; i --) { |
|
102 |
move(dest, dest->destination_at(i)); |
|
103 |
} |
|
104 |
} else if (!dest->start_node()) { |
|
105 |
// cylce in graph detected |
|
106 |
assert(_loop == NULL, "only one loop valid!"); |
|
107 |
_loop = dest; |
|
108 |
move_to_temp(src->operand()); |
|
109 |
return; |
|
110 |
} // else dest is a start node |
|
111 |
||
112 |
if (!dest->assigned()) { |
|
113 |
if (_loop == dest) { |
|
114 |
move_temp_to(dest->operand()); |
|
115 |
dest->set_assigned(); |
|
116 |
} else if (src != NULL) { |
|
117 |
emit_move(src->operand(), dest->operand()); |
|
118 |
dest->set_assigned(); |
|
119 |
} |
|
120 |
} |
|
121 |
} |
|
122 |
||
123 |
||
124 |
PhiResolver::~PhiResolver() { |
|
125 |
int i; |
|
126 |
// resolve any cycles in moves from and to virtual registers |
|
127 |
for (i = virtual_operands().length() - 1; i >= 0; i --) { |
|
128 |
ResolveNode* node = virtual_operands()[i]; |
|
129 |
if (!node->visited()) { |
|
130 |
_loop = NULL; |
|
131 |
move(NULL, node); |
|
132 |
node->set_start_node(); |
|
133 |
assert(_temp->is_illegal(), "move_temp_to() call missing"); |
|
134 |
} |
|
135 |
} |
|
136 |
||
137 |
// generate move for move from non virtual register to abitrary destination |
|
138 |
for (i = other_operands().length() - 1; i >= 0; i --) { |
|
139 |
ResolveNode* node = other_operands()[i]; |
|
140 |
for (int j = node->no_of_destinations() - 1; j >= 0; j --) { |
|
141 |
emit_move(node->operand(), node->destination_at(j)->operand()); |
|
142 |
} |
|
143 |
} |
|
144 |
} |
|
145 |
||
146 |
||
147 |
ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) { |
|
148 |
ResolveNode* node; |
|
149 |
if (opr->is_virtual()) { |
|
150 |
int vreg_num = opr->vreg_number(); |
|
151 |
node = vreg_table().at_grow(vreg_num, NULL); |
|
152 |
assert(node == NULL || node->operand() == opr, ""); |
|
153 |
if (node == NULL) { |
|
154 |
node = new ResolveNode(opr); |
|
155 |
vreg_table()[vreg_num] = node; |
|
156 |
} |
|
157 |
// Make sure that all virtual operands show up in the list when |
|
158 |
// they are used as the source of a move. |
|
159 |
if (source && !virtual_operands().contains(node)) { |
|
160 |
virtual_operands().append(node); |
|
161 |
} |
|
162 |
} else { |
|
163 |
assert(source, ""); |
|
164 |
node = new ResolveNode(opr); |
|
165 |
other_operands().append(node); |
|
166 |
} |
|
167 |
return node; |
|
168 |
} |
|
169 |
||
170 |
||
171 |
void PhiResolver::move(LIR_Opr src, LIR_Opr dest) { |
|
172 |
assert(dest->is_virtual(), ""); |
|
173 |
// tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr(); |
|
174 |
assert(src->is_valid(), ""); |
|
175 |
assert(dest->is_valid(), ""); |
|
176 |
ResolveNode* source = source_node(src); |
|
177 |
source->append(destination_node(dest)); |
|
178 |
} |
|
179 |
||
180 |
||
181 |
//-------------------------------------------------------------- |
|
182 |
// LIRItem |
|
183 |
||
184 |
void LIRItem::set_result(LIR_Opr opr) { |
|
185 |
assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change"); |
|
186 |
value()->set_operand(opr); |
|
187 |
||
188 |
if (opr->is_virtual()) { |
|
189 |
_gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL); |
|
190 |
} |
|
191 |
||
192 |
_result = opr; |
|
193 |
} |
|
194 |
||
195 |
void LIRItem::load_item() { |
|
196 |
if (result()->is_illegal()) { |
|
197 |
// update the items result |
|
198 |
_result = value()->operand(); |
|
199 |
} |
|
200 |
if (!result()->is_register()) { |
|
201 |
LIR_Opr reg = _gen->new_register(value()->type()); |
|
202 |
__ move(result(), reg); |
|
203 |
if (result()->is_constant()) { |
|
204 |
_result = reg; |
|
205 |
} else { |
|
206 |
set_result(reg); |
|
207 |
} |
|
208 |
} |
|
209 |
} |
|
210 |
||
211 |
||
212 |
void LIRItem::load_for_store(BasicType type) { |
|
213 |
if (_gen->can_store_as_constant(value(), type)) { |
|
214 |
_result = value()->operand(); |
|
215 |
if (!_result->is_constant()) { |
|
216 |
_result = LIR_OprFact::value_type(value()->type()); |
|
217 |
} |
|
218 |
} else if (type == T_BYTE || type == T_BOOLEAN) { |
|
219 |
load_byte_item(); |
|
220 |
} else { |
|
221 |
load_item(); |
|
222 |
} |
|
223 |
} |
|
224 |
||
225 |
void LIRItem::load_item_force(LIR_Opr reg) { |
|
226 |
LIR_Opr r = result(); |
|
227 |
if (r != reg) { |
|
228 |
if (r->type() != reg->type()) { |
|
229 |
// moves between different types need an intervening spill slot |
|
230 |
LIR_Opr tmp = _gen->force_to_spill(r, reg->type()); |
|
231 |
__ move(tmp, reg); |
|
232 |
} else { |
|
233 |
__ move(r, reg); |
|
234 |
} |
|
235 |
_result = reg; |
|
236 |
} |
|
237 |
} |
|
238 |
||
239 |
ciObject* LIRItem::get_jobject_constant() const { |
|
240 |
ObjectType* oc = type()->as_ObjectType(); |
|
241 |
if (oc) { |
|
242 |
return oc->constant_value(); |
|
243 |
} |
|
244 |
return NULL; |
|
245 |
} |
|
246 |
||
247 |
||
248 |
jint LIRItem::get_jint_constant() const { |
|
249 |
assert(is_constant() && value() != NULL, ""); |
|
250 |
assert(type()->as_IntConstant() != NULL, "type check"); |
|
251 |
return type()->as_IntConstant()->value(); |
|
252 |
} |
|
253 |
||
254 |
||
255 |
jint LIRItem::get_address_constant() const { |
|
256 |
assert(is_constant() && value() != NULL, ""); |
|
257 |
assert(type()->as_AddressConstant() != NULL, "type check"); |
|
258 |
return type()->as_AddressConstant()->value(); |
|
259 |
} |
|
260 |
||
261 |
||
262 |
jfloat LIRItem::get_jfloat_constant() const { |
|
263 |
assert(is_constant() && value() != NULL, ""); |
|
264 |
assert(type()->as_FloatConstant() != NULL, "type check"); |
|
265 |
return type()->as_FloatConstant()->value(); |
|
266 |
} |
|
267 |
||
268 |
||
269 |
jdouble LIRItem::get_jdouble_constant() const { |
|
270 |
assert(is_constant() && value() != NULL, ""); |
|
271 |
assert(type()->as_DoubleConstant() != NULL, "type check"); |
|
272 |
return type()->as_DoubleConstant()->value(); |
|
273 |
} |
|
274 |
||
275 |
||
276 |
jlong LIRItem::get_jlong_constant() const { |
|
277 |
assert(is_constant() && value() != NULL, ""); |
|
278 |
assert(type()->as_LongConstant() != NULL, "type check"); |
|
279 |
return type()->as_LongConstant()->value(); |
|
280 |
} |
|
281 |
||
282 |
||
283 |
||
284 |
//-------------------------------------------------------------- |
|
285 |
||
286 |
||
287 |
void LIRGenerator::init() { |
|
1374 | 288 |
_bs = Universe::heap()->barrier_set(); |
1 | 289 |
} |
290 |
||
291 |
||
292 |
void LIRGenerator::block_do_prolog(BlockBegin* block) { |
|
293 |
#ifndef PRODUCT |
|
294 |
if (PrintIRWithLIR) { |
|
295 |
block->print(); |
|
296 |
} |
|
297 |
#endif |
|
298 |
||
299 |
// set up the list of LIR instructions |
|
300 |
assert(block->lir() == NULL, "LIR list already computed for this block"); |
|
301 |
_lir = new LIR_List(compilation(), block); |
|
302 |
block->set_lir(_lir); |
|
303 |
||
304 |
__ branch_destination(block->label()); |
|
305 |
||
306 |
if (LIRTraceExecution && |
|
307 |
Compilation::current_compilation()->hir()->start()->block_id() != block->block_id() && |
|
308 |
!block->is_set(BlockBegin::exception_entry_flag)) { |
|
309 |
assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst"); |
|
310 |
trace_block_entry(block); |
|
311 |
} |
|
312 |
} |
|
313 |
||
314 |
||
315 |
void LIRGenerator::block_do_epilog(BlockBegin* block) { |
|
316 |
#ifndef PRODUCT |
|
317 |
if (PrintIRWithLIR) { |
|
318 |
tty->cr(); |
|
319 |
} |
|
320 |
#endif |
|
321 |
||
322 |
// LIR_Opr for unpinned constants shouldn't be referenced by other |
|
323 |
// blocks so clear them out after processing the block. |
|
324 |
for (int i = 0; i < _unpinned_constants.length(); i++) { |
|
325 |
_unpinned_constants.at(i)->clear_operand(); |
|
326 |
} |
|
327 |
_unpinned_constants.trunc_to(0); |
|
328 |
||
329 |
// clear our any registers for other local constants |
|
330 |
_constants.trunc_to(0); |
|
331 |
_reg_for_constants.trunc_to(0); |
|
332 |
} |
|
333 |
||
334 |
||
335 |
void LIRGenerator::block_do(BlockBegin* block) { |
|
336 |
CHECK_BAILOUT(); |
|
337 |
||
338 |
block_do_prolog(block); |
|
339 |
set_block(block); |
|
340 |
||
341 |
for (Instruction* instr = block; instr != NULL; instr = instr->next()) { |
|
342 |
if (instr->is_pinned()) do_root(instr); |
|
343 |
} |
|
344 |
||
345 |
set_block(NULL); |
|
346 |
block_do_epilog(block); |
|
347 |
} |
|
348 |
||
349 |
||
350 |
//-------------------------LIRGenerator----------------------------- |
|
351 |
||
352 |
// This is where the tree-walk starts; instr must be root; |
|
353 |
void LIRGenerator::do_root(Value instr) { |
|
354 |
CHECK_BAILOUT(); |
|
355 |
||
356 |
InstructionMark im(compilation(), instr); |
|
357 |
||
358 |
assert(instr->is_pinned(), "use only with roots"); |
|
359 |
assert(instr->subst() == instr, "shouldn't have missed substitution"); |
|
360 |
||
361 |
instr->visit(this); |
|
362 |
||
363 |
assert(!instr->has_uses() || instr->operand()->is_valid() || |
|
364 |
instr->as_Constant() != NULL || bailed_out(), "invalid item set"); |
|
365 |
} |
|
366 |
||
367 |
||
368 |
// This is called for each node in tree; the walk stops if a root is reached |
|
369 |
void LIRGenerator::walk(Value instr) { |
|
370 |
InstructionMark im(compilation(), instr); |
|
371 |
//stop walk when encounter a root |
|
372 |
if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) { |
|
373 |
assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited"); |
|
374 |
} else { |
|
375 |
assert(instr->subst() == instr, "shouldn't have missed substitution"); |
|
376 |
instr->visit(this); |
|
377 |
// assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use"); |
|
378 |
} |
|
379 |
} |
|
380 |
||
381 |
||
382 |
CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { |
|
383 |
int index; |
|
384 |
Value value; |
|
385 |
for_each_stack_value(state, index, value) { |
|
386 |
assert(value->subst() == value, "missed substition"); |
|
387 |
if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { |
|
388 |
walk(value); |
|
389 |
assert(value->operand()->is_valid(), "must be evaluated now"); |
|
390 |
} |
|
391 |
} |
|
392 |
ValueStack* s = state; |
|
393 |
int bci = x->bci(); |
|
394 |
for_each_state(s) { |
|
395 |
IRScope* scope = s->scope(); |
|
396 |
ciMethod* method = scope->method(); |
|
397 |
||
398 |
MethodLivenessResult liveness = method->liveness_at_bci(bci); |
|
399 |
if (bci == SynchronizationEntryBCI) { |
|
400 |
if (x->as_ExceptionObject() || x->as_Throw()) { |
|
401 |
// all locals are dead on exit from the synthetic unlocker |
|
402 |
liveness.clear(); |
|
403 |
} else { |
|
404 |
assert(x->as_MonitorEnter(), "only other case is MonitorEnter"); |
|
405 |
} |
|
406 |
} |
|
407 |
if (!liveness.is_valid()) { |
|
408 |
// Degenerate or breakpointed method. |
|
409 |
bailout("Degenerate or breakpointed method"); |
|
410 |
} else { |
|
411 |
assert((int)liveness.size() == s->locals_size(), "error in use of liveness"); |
|
412 |
for_each_local_value(s, index, value) { |
|
413 |
assert(value->subst() == value, "missed substition"); |
|
414 |
if (liveness.at(index) && !value->type()->is_illegal()) { |
|
415 |
if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { |
|
416 |
walk(value); |
|
417 |
assert(value->operand()->is_valid(), "must be evaluated now"); |
|
418 |
} |
|
419 |
} else { |
|
420 |
// NULL out this local so that linear scan can assume that all non-NULL values are live. |
|
421 |
s->invalidate_local(index); |
|
422 |
} |
|
423 |
} |
|
424 |
} |
|
425 |
bci = scope->caller_bci(); |
|
426 |
} |
|
427 |
||
428 |
return new CodeEmitInfo(x->bci(), state, ignore_xhandler ? NULL : x->exception_handlers()); |
|
429 |
} |
|
430 |
||
431 |
||
432 |
CodeEmitInfo* LIRGenerator::state_for(Instruction* x) { |
|
433 |
return state_for(x, x->lock_stack()); |
|
434 |
} |
|
435 |
||
436 |
||
437 |
void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) { |
|
438 |
if (!obj->is_loaded() || PatchALot) { |
|
439 |
assert(info != NULL, "info must be set if class is not loaded"); |
|
440 |
__ oop2reg_patch(NULL, r, info); |
|
441 |
} else { |
|
442 |
// no patching needed |
|
443 |
__ oop2reg(obj->encoding(), r); |
|
444 |
} |
|
445 |
} |
|
446 |
||
447 |
||
448 |
void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index, |
|
449 |
CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) { |
|
450 |
CodeStub* stub = new RangeCheckStub(range_check_info, index); |
|
451 |
if (index->is_constant()) { |
|
452 |
cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(), |
|
453 |
index->as_jint(), null_check_info); |
|
454 |
__ branch(lir_cond_belowEqual, T_INT, stub); // forward branch |
|
455 |
} else { |
|
456 |
cmp_reg_mem(lir_cond_aboveEqual, index, array, |
|
457 |
arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info); |
|
458 |
__ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch |
|
459 |
} |
|
460 |
} |
|
461 |
||
462 |
||
463 |
void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) { |
|
464 |
CodeStub* stub = new RangeCheckStub(info, index, true); |
|
465 |
if (index->is_constant()) { |
|
466 |
cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info); |
|
467 |
__ branch(lir_cond_belowEqual, T_INT, stub); // forward branch |
|
468 |
} else { |
|
469 |
cmp_reg_mem(lir_cond_aboveEqual, index, buffer, |
|
470 |
java_nio_Buffer::limit_offset(), T_INT, info); |
|
471 |
__ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch |
|
472 |
} |
|
473 |
__ move(index, result); |
|
474 |
} |
|
475 |
||
476 |
||
477 |
// increment a counter returning the incremented value |
|
478 |
LIR_Opr LIRGenerator::increment_and_return_counter(LIR_Opr base, int offset, int increment) { |
|
479 |
LIR_Address* counter = new LIR_Address(base, offset, T_INT); |
|
480 |
LIR_Opr result = new_register(T_INT); |
|
481 |
__ load(counter, result); |
|
482 |
__ add(result, LIR_OprFact::intConst(increment), result); |
|
483 |
__ store(result, counter); |
|
484 |
return result; |
|
485 |
} |
|
486 |
||
487 |
||
488 |
void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) { |
|
489 |
LIR_Opr result_op = result; |
|
490 |
LIR_Opr left_op = left; |
|
491 |
LIR_Opr right_op = right; |
|
492 |
||
493 |
if (TwoOperandLIRForm && left_op != result_op) { |
|
494 |
assert(right_op != result_op, "malformed"); |
|
495 |
__ move(left_op, result_op); |
|
496 |
left_op = result_op; |
|
497 |
} |
|
498 |
||
499 |
switch(code) { |
|
500 |
case Bytecodes::_dadd: |
|
501 |
case Bytecodes::_fadd: |
|
502 |
case Bytecodes::_ladd: |
|
503 |
case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break; |
|
504 |
case Bytecodes::_fmul: |
|
505 |
case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break; |
|
506 |
||
507 |
case Bytecodes::_dmul: |
|
508 |
{ |
|
509 |
if (is_strictfp) { |
|
510 |
__ mul_strictfp(left_op, right_op, result_op, tmp_op); break; |
|
511 |
} else { |
|
512 |
__ mul(left_op, right_op, result_op); break; |
|
513 |
} |
|
514 |
} |
|
515 |
break; |
|
516 |
||
517 |
case Bytecodes::_imul: |
|
518 |
{ |
|
519 |
bool did_strength_reduce = false; |
|
520 |
||
521 |
if (right->is_constant()) { |
|
522 |
int c = right->as_jint(); |
|
523 |
if (is_power_of_2(c)) { |
|
524 |
// do not need tmp here |
|
525 |
__ shift_left(left_op, exact_log2(c), result_op); |
|
526 |
did_strength_reduce = true; |
|
527 |
} else { |
|
528 |
did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op); |
|
529 |
} |
|
530 |
} |
|
531 |
// we couldn't strength reduce so just emit the multiply |
|
532 |
if (!did_strength_reduce) { |
|
533 |
__ mul(left_op, right_op, result_op); |
|
534 |
} |
|
535 |
} |
|
536 |
break; |
|
537 |
||
538 |
case Bytecodes::_dsub: |
|
539 |
case Bytecodes::_fsub: |
|
540 |
case Bytecodes::_lsub: |
|
541 |
case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break; |
|
542 |
||
543 |
case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break; |
|
544 |
// ldiv and lrem are implemented with a direct runtime call |
|
545 |
||
546 |
case Bytecodes::_ddiv: |
|
547 |
{ |
|
548 |
if (is_strictfp) { |
|
549 |
__ div_strictfp (left_op, right_op, result_op, tmp_op); break; |
|
550 |
} else { |
|
551 |
__ div (left_op, right_op, result_op); break; |
|
552 |
} |
|
553 |
} |
|
554 |
break; |
|
555 |
||
556 |
case Bytecodes::_drem: |
|
557 |
case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break; |
|
558 |
||
559 |
default: ShouldNotReachHere(); |
|
560 |
} |
|
561 |
} |
|
562 |
||
563 |
||
564 |
void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) { |
|
565 |
arithmetic_op(code, result, left, right, false, tmp); |
|
566 |
} |
|
567 |
||
568 |
||
569 |
void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) { |
|
570 |
arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info); |
|
571 |
} |
|
572 |
||
573 |
||
574 |
void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) { |
|
575 |
arithmetic_op(code, result, left, right, is_strictfp, tmp); |
|
576 |
} |
|
577 |
||
578 |
||
579 |
void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) { |
|
580 |
if (TwoOperandLIRForm && value != result_op) { |
|
581 |
assert(count != result_op, "malformed"); |
|
582 |
__ move(value, result_op); |
|
583 |
value = result_op; |
|
584 |
} |
|
585 |
||
586 |
assert(count->is_constant() || count->is_register(), "must be"); |
|
587 |
switch(code) { |
|
588 |
case Bytecodes::_ishl: |
|
589 |
case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break; |
|
590 |
case Bytecodes::_ishr: |
|
591 |
case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break; |
|
592 |
case Bytecodes::_iushr: |
|
593 |
case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break; |
|
594 |
default: ShouldNotReachHere(); |
|
595 |
} |
|
596 |
} |
|
597 |
||
598 |
||
599 |
void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) { |
|
600 |
if (TwoOperandLIRForm && left_op != result_op) { |
|
601 |
assert(right_op != result_op, "malformed"); |
|
602 |
__ move(left_op, result_op); |
|
603 |
left_op = result_op; |
|
604 |
} |
|
605 |
||
606 |
switch(code) { |
|
607 |
case Bytecodes::_iand: |
|
608 |
case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break; |
|
609 |
||
610 |
case Bytecodes::_ior: |
|
611 |
case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break; |
|
612 |
||
613 |
case Bytecodes::_ixor: |
|
614 |
case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break; |
|
615 |
||
616 |
default: ShouldNotReachHere(); |
|
617 |
} |
|
618 |
} |
|
619 |
||
620 |
||
621 |
void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) { |
|
622 |
if (!GenerateSynchronizationCode) return; |
|
623 |
// for slow path, use debug info for state after successful locking |
|
624 |
CodeStub* slow_path = new MonitorEnterStub(object, lock, info); |
|
625 |
__ load_stack_address_monitor(monitor_no, lock); |
|
626 |
// for handling NullPointerException, use debug info representing just the lock stack before this monitorenter |
|
627 |
__ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception); |
|
628 |
} |
|
629 |
||
630 |
||
631 |
void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, int monitor_no) { |
|
632 |
if (!GenerateSynchronizationCode) return; |
|
633 |
// setup registers |
|
634 |
LIR_Opr hdr = lock; |
|
635 |
lock = new_hdr; |
|
636 |
CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no); |
|
637 |
__ load_stack_address_monitor(monitor_no, lock); |
|
638 |
__ unlock_object(hdr, object, lock, slow_path); |
|
639 |
} |
|
640 |
||
641 |
||
642 |
void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) { |
|
643 |
jobject2reg_with_patching(klass_reg, klass, info); |
|
644 |
// If klass is not loaded we do not know if the klass has finalizers: |
|
645 |
if (UseFastNewInstance && klass->is_loaded() |
|
646 |
&& !Klass::layout_helper_needs_slow_path(klass->layout_helper())) { |
|
647 |
||
648 |
Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id; |
|
649 |
||
650 |
CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id); |
|
651 |
||
652 |
assert(klass->is_loaded(), "must be loaded"); |
|
653 |
// allocate space for instance |
|
654 |
assert(klass->size_helper() >= 0, "illegal instance size"); |
|
655 |
const int instance_size = align_object_size(klass->size_helper()); |
|
656 |
__ allocate_object(dst, scratch1, scratch2, scratch3, scratch4, |
|
657 |
oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path); |
|
658 |
} else { |
|
659 |
CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id); |
|
660 |
__ branch(lir_cond_always, T_ILLEGAL, slow_path); |
|
661 |
__ branch_destination(slow_path->continuation()); |
|
662 |
} |
|
663 |
} |
|
664 |
||
665 |
||
666 |
static bool is_constant_zero(Instruction* inst) { |
|
667 |
IntConstant* c = inst->type()->as_IntConstant(); |
|
668 |
if (c) { |
|
669 |
return (c->value() == 0); |
|
670 |
} |
|
671 |
return false; |
|
672 |
} |
|
673 |
||
674 |
||
675 |
static bool positive_constant(Instruction* inst) { |
|
676 |
IntConstant* c = inst->type()->as_IntConstant(); |
|
677 |
if (c) { |
|
678 |
return (c->value() >= 0); |
|
679 |
} |
|
680 |
return false; |
|
681 |
} |
|
682 |
||
683 |
||
684 |
static ciArrayKlass* as_array_klass(ciType* type) { |
|
685 |
if (type != NULL && type->is_array_klass() && type->is_loaded()) { |
|
686 |
return (ciArrayKlass*)type; |
|
687 |
} else { |
|
688 |
return NULL; |
|
689 |
} |
|
690 |
} |
|
691 |
||
692 |
void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) { |
|
693 |
Instruction* src = x->argument_at(0); |
|
694 |
Instruction* src_pos = x->argument_at(1); |
|
695 |
Instruction* dst = x->argument_at(2); |
|
696 |
Instruction* dst_pos = x->argument_at(3); |
|
697 |
Instruction* length = x->argument_at(4); |
|
698 |
||
699 |
// first try to identify the likely type of the arrays involved |
|
700 |
ciArrayKlass* expected_type = NULL; |
|
701 |
bool is_exact = false; |
|
702 |
{ |
|
703 |
ciArrayKlass* src_exact_type = as_array_klass(src->exact_type()); |
|
704 |
ciArrayKlass* src_declared_type = as_array_klass(src->declared_type()); |
|
705 |
ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type()); |
|
706 |
ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type()); |
|
707 |
if (src_exact_type != NULL && src_exact_type == dst_exact_type) { |
|
708 |
// the types exactly match so the type is fully known |
|
709 |
is_exact = true; |
|
710 |
expected_type = src_exact_type; |
|
711 |
} else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) { |
|
712 |
ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type; |
|
713 |
ciArrayKlass* src_type = NULL; |
|
714 |
if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) { |
|
715 |
src_type = (ciArrayKlass*) src_exact_type; |
|
716 |
} else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) { |
|
717 |
src_type = (ciArrayKlass*) src_declared_type; |
|
718 |
} |
|
719 |
if (src_type != NULL) { |
|
720 |
if (src_type->element_type()->is_subtype_of(dst_type->element_type())) { |
|
721 |
is_exact = true; |
|
722 |
expected_type = dst_type; |
|
723 |
} |
|
724 |
} |
|
725 |
} |
|
726 |
// at least pass along a good guess |
|
727 |
if (expected_type == NULL) expected_type = dst_exact_type; |
|
728 |
if (expected_type == NULL) expected_type = src_declared_type; |
|
729 |
if (expected_type == NULL) expected_type = dst_declared_type; |
|
730 |
} |
|
731 |
||
732 |
// if a probable array type has been identified, figure out if any |
|
733 |
// of the required checks for a fast case can be elided. |
|
734 |
int flags = LIR_OpArrayCopy::all_flags; |
|
735 |
if (expected_type != NULL) { |
|
736 |
// try to skip null checks |
|
737 |
if (src->as_NewArray() != NULL) |
|
738 |
flags &= ~LIR_OpArrayCopy::src_null_check; |
|
739 |
if (dst->as_NewArray() != NULL) |
|
740 |
flags &= ~LIR_OpArrayCopy::dst_null_check; |
|
741 |
||
742 |
// check from incoming constant values |
|
743 |
if (positive_constant(src_pos)) |
|
744 |
flags &= ~LIR_OpArrayCopy::src_pos_positive_check; |
|
745 |
if (positive_constant(dst_pos)) |
|
746 |
flags &= ~LIR_OpArrayCopy::dst_pos_positive_check; |
|
747 |
if (positive_constant(length)) |
|
748 |
flags &= ~LIR_OpArrayCopy::length_positive_check; |
|
749 |
||
750 |
// see if the range check can be elided, which might also imply |
|
751 |
// that src or dst is non-null. |
|
752 |
ArrayLength* al = length->as_ArrayLength(); |
|
753 |
if (al != NULL) { |
|
754 |
if (al->array() == src) { |
|
755 |
// it's the length of the source array |
|
756 |
flags &= ~LIR_OpArrayCopy::length_positive_check; |
|
757 |
flags &= ~LIR_OpArrayCopy::src_null_check; |
|
758 |
if (is_constant_zero(src_pos)) |
|
759 |
flags &= ~LIR_OpArrayCopy::src_range_check; |
|
760 |
} |
|
761 |
if (al->array() == dst) { |
|
762 |
// it's the length of the destination array |
|
763 |
flags &= ~LIR_OpArrayCopy::length_positive_check; |
|
764 |
flags &= ~LIR_OpArrayCopy::dst_null_check; |
|
765 |
if (is_constant_zero(dst_pos)) |
|
766 |
flags &= ~LIR_OpArrayCopy::dst_range_check; |
|
767 |
} |
|
768 |
} |
|
769 |
if (is_exact) { |
|
770 |
flags &= ~LIR_OpArrayCopy::type_check; |
|
771 |
} |
|
772 |
} |
|
773 |
||
774 |
if (src == dst) { |
|
775 |
// moving within a single array so no type checks are needed |
|
776 |
if (flags & LIR_OpArrayCopy::type_check) { |
|
777 |
flags &= ~LIR_OpArrayCopy::type_check; |
|
778 |
} |
|
779 |
} |
|
780 |
*flagsp = flags; |
|
781 |
*expected_typep = (ciArrayKlass*)expected_type; |
|
782 |
} |
|
783 |
||
784 |
||
785 |
LIR_Opr LIRGenerator::round_item(LIR_Opr opr) { |
|
786 |
assert(opr->is_register(), "why spill if item is not register?"); |
|
787 |
||
788 |
if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) { |
|
789 |
LIR_Opr result = new_register(T_FLOAT); |
|
790 |
set_vreg_flag(result, must_start_in_memory); |
|
791 |
assert(opr->is_register(), "only a register can be spilled"); |
|
792 |
assert(opr->value_type()->is_float(), "rounding only for floats available"); |
|
793 |
__ roundfp(opr, LIR_OprFact::illegalOpr, result); |
|
794 |
return result; |
|
795 |
} |
|
796 |
return opr; |
|
797 |
} |
|
798 |
||
799 |
||
800 |
LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) { |
|
801 |
assert(type2size[t] == type2size[value->type()], "size mismatch"); |
|
802 |
if (!value->is_register()) { |
|
803 |
// force into a register |
|
804 |
LIR_Opr r = new_register(value->type()); |
|
805 |
__ move(value, r); |
|
806 |
value = r; |
|
807 |
} |
|
808 |
||
809 |
// create a spill location |
|
810 |
LIR_Opr tmp = new_register(t); |
|
811 |
set_vreg_flag(tmp, LIRGenerator::must_start_in_memory); |
|
812 |
||
813 |
// move from register to spill |
|
814 |
__ move(value, tmp); |
|
815 |
return tmp; |
|
816 |
} |
|
817 |
||
818 |
||
819 |
void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { |
|
820 |
if (if_instr->should_profile()) { |
|
821 |
ciMethod* method = if_instr->profiled_method(); |
|
822 |
assert(method != NULL, "method should be set if branch is profiled"); |
|
823 |
ciMethodData* md = method->method_data(); |
|
824 |
if (md == NULL) { |
|
825 |
bailout("out of memory building methodDataOop"); |
|
826 |
return; |
|
827 |
} |
|
828 |
ciProfileData* data = md->bci_to_data(if_instr->profiled_bci()); |
|
829 |
assert(data != NULL, "must have profiling data"); |
|
830 |
assert(data->is_BranchData(), "need BranchData for two-way branches"); |
|
831 |
int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); |
|
832 |
int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); |
|
833 |
LIR_Opr md_reg = new_register(T_OBJECT); |
|
834 |
__ move(LIR_OprFact::oopConst(md->encoding()), md_reg); |
|
835 |
LIR_Opr data_offset_reg = new_register(T_INT); |
|
836 |
__ cmove(lir_cond(cond), |
|
837 |
LIR_OprFact::intConst(taken_count_offset), |
|
838 |
LIR_OprFact::intConst(not_taken_count_offset), |
|
839 |
data_offset_reg); |
|
840 |
LIR_Opr data_reg = new_register(T_INT); |
|
841 |
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT); |
|
842 |
__ move(LIR_OprFact::address(data_addr), data_reg); |
|
843 |
LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT); |
|
844 |
// Use leal instead of add to avoid destroying condition codes on x86 |
|
845 |
__ leal(LIR_OprFact::address(fake_incr_value), data_reg); |
|
846 |
__ move(data_reg, LIR_OprFact::address(data_addr)); |
|
847 |
} |
|
848 |
} |
|
849 |
||
850 |
||
851 |
// Phi technique: |
|
852 |
// This is about passing live values from one basic block to the other. |
|
853 |
// In code generated with Java it is rather rare that more than one |
|
854 |
// value is on the stack from one basic block to the other. |
|
855 |
// We optimize our technique for efficient passing of one value |
|
856 |
// (of type long, int, double..) but it can be extended. |
|
857 |
// When entering or leaving a basic block, all registers and all spill |
|
858 |
// slots are release and empty. We use the released registers |
|
859 |
// and spill slots to pass the live values from one block |
|
860 |
// to the other. The topmost value, i.e., the value on TOS of expression |
|
861 |
// stack is passed in registers. All other values are stored in spilling |
|
862 |
// area. Every Phi has an index which designates its spill slot |
|
863 |
// At exit of a basic block, we fill the register(s) and spill slots. |
|
864 |
// At entry of a basic block, the block_prolog sets up the content of phi nodes |
|
865 |
// and locks necessary registers and spilling slots. |
|
866 |
||
867 |
||
868 |
// move current value to referenced phi function |
|
869 |
void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) { |
|
870 |
Phi* phi = sux_val->as_Phi(); |
|
871 |
// cur_val can be null without phi being null in conjunction with inlining |
|
872 |
if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) { |
|
873 |
LIR_Opr operand = cur_val->operand(); |
|
874 |
if (cur_val->operand()->is_illegal()) { |
|
875 |
assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL, |
|
876 |
"these can be produced lazily"); |
|
877 |
operand = operand_for_instruction(cur_val); |
|
878 |
} |
|
879 |
resolver->move(operand, operand_for_instruction(phi)); |
|
880 |
} |
|
881 |
} |
|
882 |
||
883 |
||
884 |
// Moves all stack values into their PHI position |
|
885 |
void LIRGenerator::move_to_phi(ValueStack* cur_state) { |
|
886 |
BlockBegin* bb = block(); |
|
887 |
if (bb->number_of_sux() == 1) { |
|
888 |
BlockBegin* sux = bb->sux_at(0); |
|
889 |
assert(sux->number_of_preds() > 0, "invalid CFG"); |
|
890 |
||
891 |
// a block with only one predecessor never has phi functions |
|
892 |
if (sux->number_of_preds() > 1) { |
|
893 |
int max_phis = cur_state->stack_size() + cur_state->locals_size(); |
|
894 |
PhiResolver resolver(this, _virtual_register_number + max_phis * 2); |
|
895 |
||
896 |
ValueStack* sux_state = sux->state(); |
|
897 |
Value sux_value; |
|
898 |
int index; |
|
899 |
||
900 |
for_each_stack_value(sux_state, index, sux_value) { |
|
901 |
move_to_phi(&resolver, cur_state->stack_at(index), sux_value); |
|
902 |
} |
|
903 |
||
904 |
// Inlining may cause the local state not to match up, so walk up |
|
905 |
// the caller state until we get to the same scope as the |
|
906 |
// successor and then start processing from there. |
|
907 |
while (cur_state->scope() != sux_state->scope()) { |
|
908 |
cur_state = cur_state->caller_state(); |
|
909 |
assert(cur_state != NULL, "scopes don't match up"); |
|
910 |
} |
|
911 |
||
912 |
for_each_local_value(sux_state, index, sux_value) { |
|
913 |
move_to_phi(&resolver, cur_state->local_at(index), sux_value); |
|
914 |
} |
|
915 |
||
916 |
assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal"); |
|
917 |
} |
|
918 |
} |
|
919 |
} |
|
920 |
||
921 |
||
922 |
LIR_Opr LIRGenerator::new_register(BasicType type) { |
|
923 |
int vreg = _virtual_register_number; |
|
924 |
// add a little fudge factor for the bailout, since the bailout is |
|
925 |
// only checked periodically. This gives a few extra registers to |
|
926 |
// hand out before we really run out, which helps us keep from |
|
927 |
// tripping over assertions. |
|
928 |
if (vreg + 20 >= LIR_OprDesc::vreg_max) { |
|
929 |
bailout("out of virtual registers"); |
|
930 |
if (vreg + 2 >= LIR_OprDesc::vreg_max) { |
|
931 |
// wrap it around |
|
932 |
_virtual_register_number = LIR_OprDesc::vreg_base; |
|
933 |
} |
|
934 |
} |
|
935 |
_virtual_register_number += 1; |
|
936 |
if (type == T_ADDRESS) type = T_INT; |
|
937 |
return LIR_OprFact::virtual_register(vreg, type); |
|
938 |
} |
|
939 |
||
940 |
||
941 |
// Try to lock using register in hint |
|
942 |
LIR_Opr LIRGenerator::rlock(Value instr) { |
|
943 |
return new_register(instr->type()); |
|
944 |
} |
|
945 |
||
946 |
||
947 |
// does an rlock and sets result |
|
948 |
LIR_Opr LIRGenerator::rlock_result(Value x) { |
|
949 |
LIR_Opr reg = rlock(x); |
|
950 |
set_result(x, reg); |
|
951 |
return reg; |
|
952 |
} |
|
953 |
||
954 |
||
955 |
// does an rlock and sets result |
|
956 |
LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) { |
|
957 |
LIR_Opr reg; |
|
958 |
switch (type) { |
|
959 |
case T_BYTE: |
|
960 |
case T_BOOLEAN: |
|
961 |
reg = rlock_byte(type); |
|
962 |
break; |
|
963 |
default: |
|
964 |
reg = rlock(x); |
|
965 |
break; |
|
966 |
} |
|
967 |
||
968 |
set_result(x, reg); |
|
969 |
return reg; |
|
970 |
} |
|
971 |
||
972 |
||
973 |
//--------------------------------------------------------------------- |
|
974 |
ciObject* LIRGenerator::get_jobject_constant(Value value) { |
|
975 |
ObjectType* oc = value->type()->as_ObjectType(); |
|
976 |
if (oc) { |
|
977 |
return oc->constant_value(); |
|
978 |
} |
|
979 |
return NULL; |
|
980 |
} |
|
981 |
||
982 |
||
983 |
void LIRGenerator::do_ExceptionObject(ExceptionObject* x) { |
|
984 |
assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block"); |
|
985 |
assert(block()->next() == x, "ExceptionObject must be first instruction of block"); |
|
986 |
||
987 |
// no moves are created for phi functions at the begin of exception |
|
988 |
// handlers, so assign operands manually here |
|
989 |
for_each_phi_fun(block(), phi, |
|
990 |
operand_for_instruction(phi)); |
|
991 |
||
992 |
LIR_Opr thread_reg = getThreadPointer(); |
|
993 |
__ move(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT), |
|
994 |
exceptionOopOpr()); |
|
995 |
__ move(LIR_OprFact::oopConst(NULL), |
|
996 |
new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT)); |
|
997 |
__ move(LIR_OprFact::oopConst(NULL), |
|
998 |
new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT)); |
|
999 |
||
1000 |
LIR_Opr result = new_register(T_OBJECT); |
|
1001 |
__ move(exceptionOopOpr(), result); |
|
1002 |
set_result(x, result); |
|
1003 |
} |
|
1004 |
||
1005 |
||
1006 |
//---------------------------------------------------------------------- |
|
1007 |
//---------------------------------------------------------------------- |
|
1008 |
//---------------------------------------------------------------------- |
|
1009 |
//---------------------------------------------------------------------- |
|
1010 |
// visitor functions |
|
1011 |
//---------------------------------------------------------------------- |
|
1012 |
//---------------------------------------------------------------------- |
|
1013 |
//---------------------------------------------------------------------- |
|
1014 |
//---------------------------------------------------------------------- |
|
1015 |
||
1016 |
void LIRGenerator::do_Phi(Phi* x) { |
|
1017 |
// phi functions are never visited directly |
|
1018 |
ShouldNotReachHere(); |
|
1019 |
} |
|
1020 |
||
1021 |
||
1022 |
// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined. |
|
1023 |
void LIRGenerator::do_Constant(Constant* x) { |
|
1024 |
if (x->state() != NULL) { |
|
1025 |
// Any constant with a ValueStack requires patching so emit the patch here |
|
1026 |
LIR_Opr reg = rlock_result(x); |
|
1027 |
CodeEmitInfo* info = state_for(x, x->state()); |
|
1028 |
__ oop2reg_patch(NULL, reg, info); |
|
1029 |
} else if (x->use_count() > 1 && !can_inline_as_constant(x)) { |
|
1030 |
if (!x->is_pinned()) { |
|
1031 |
// unpinned constants are handled specially so that they can be |
|
1032 |
// put into registers when they are used multiple times within a |
|
1033 |
// block. After the block completes their operand will be |
|
1034 |
// cleared so that other blocks can't refer to that register. |
|
1035 |
set_result(x, load_constant(x)); |
|
1036 |
} else { |
|
1037 |
LIR_Opr res = x->operand(); |
|
1038 |
if (!res->is_valid()) { |
|
1039 |
res = LIR_OprFact::value_type(x->type()); |
|
1040 |
} |
|
1041 |
if (res->is_constant()) { |
|
1042 |
LIR_Opr reg = rlock_result(x); |
|
1043 |
__ move(res, reg); |
|
1044 |
} else { |
|
1045 |
set_result(x, res); |
|
1046 |
} |
|
1047 |
} |
|
1048 |
} else { |
|
1049 |
set_result(x, LIR_OprFact::value_type(x->type())); |
|
1050 |
} |
|
1051 |
} |
|
1052 |
||
1053 |
||
1054 |
void LIRGenerator::do_Local(Local* x) { |
|
1055 |
// operand_for_instruction has the side effect of setting the result |
|
1056 |
// so there's no need to do it here. |
|
1057 |
operand_for_instruction(x); |
|
1058 |
} |
|
1059 |
||
1060 |
||
1061 |
void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) { |
|
1062 |
Unimplemented(); |
|
1063 |
} |
|
1064 |
||
1065 |
||
1066 |
void LIRGenerator::do_Return(Return* x) { |
|
1067 |
if (DTraceMethodProbes) { |
|
1068 |
BasicTypeList signature; |
|
1069 |
signature.append(T_INT); // thread |
|
1070 |
signature.append(T_OBJECT); // methodOop |
|
1071 |
LIR_OprList* args = new LIR_OprList(); |
|
1072 |
args->append(getThreadPointer()); |
|
1073 |
LIR_Opr meth = new_register(T_OBJECT); |
|
1074 |
__ oop2reg(method()->encoding(), meth); |
|
1075 |
args->append(meth); |
|
1076 |
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL); |
|
1077 |
} |
|
1078 |
||
1079 |
if (x->type()->is_void()) { |
|
1080 |
__ return_op(LIR_OprFact::illegalOpr); |
|
1081 |
} else { |
|
1082 |
LIR_Opr reg = result_register_for(x->type(), /*callee=*/true); |
|
1083 |
LIRItem result(x->result(), this); |
|
1084 |
||
1085 |
result.load_item_force(reg); |
|
1086 |
__ return_op(result.result()); |
|
1087 |
} |
|
1088 |
set_no_result(x); |
|
1089 |
} |
|
1090 |
||
1091 |
||
1092 |
// Example: object.getClass () |
|
1093 |
void LIRGenerator::do_getClass(Intrinsic* x) { |
|
1094 |
assert(x->number_of_arguments() == 1, "wrong type"); |
|
1095 |
||
1096 |
LIRItem rcvr(x->argument_at(0), this); |
|
1097 |
rcvr.load_item(); |
|
1098 |
LIR_Opr result = rlock_result(x); |
|
1099 |
||
1100 |
// need to perform the null check on the rcvr |
|
1101 |
CodeEmitInfo* info = NULL; |
|
1102 |
if (x->needs_null_check()) { |
|
1103 |
info = state_for(x, x->state()->copy_locks()); |
|
1104 |
} |
|
1105 |
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info); |
|
1106 |
__ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() + |
|
1107 |
klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result); |
|
1108 |
} |
|
1109 |
||
1110 |
||
1111 |
// Example: Thread.currentThread() |
|
1112 |
void LIRGenerator::do_currentThread(Intrinsic* x) { |
|
1113 |
assert(x->number_of_arguments() == 0, "wrong type"); |
|
1114 |
LIR_Opr reg = rlock_result(x); |
|
1115 |
__ load(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg); |
|
1116 |
} |
|
1117 |
||
1118 |
||
1119 |
void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) { |
|
1120 |
assert(x->number_of_arguments() == 1, "wrong type"); |
|
1121 |
LIRItem receiver(x->argument_at(0), this); |
|
1122 |
||
1123 |
receiver.load_item(); |
|
1124 |
BasicTypeList signature; |
|
1125 |
signature.append(T_OBJECT); // receiver |
|
1126 |
LIR_OprList* args = new LIR_OprList(); |
|
1127 |
args->append(receiver.result()); |
|
1128 |
CodeEmitInfo* info = state_for(x, x->state()); |
|
1129 |
call_runtime(&signature, args, |
|
1130 |
CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)), |
|
1131 |
voidType, info); |
|
1132 |
||
1133 |
set_no_result(x); |
|
1134 |
} |
|
1135 |
||
1136 |
||
1137 |
//------------------------local access-------------------------------------- |
|
1138 |
||
1139 |
LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) { |
|
1140 |
if (x->operand()->is_illegal()) { |
|
1141 |
Constant* c = x->as_Constant(); |
|
1142 |
if (c != NULL) { |
|
1143 |
x->set_operand(LIR_OprFact::value_type(c->type())); |
|
1144 |
} else { |
|
1145 |
assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local"); |
|
1146 |
// allocate a virtual register for this local or phi |
|
1147 |
x->set_operand(rlock(x)); |
|
1148 |
_instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL); |
|
1149 |
} |
|
1150 |
} |
|
1151 |
return x->operand(); |
|
1152 |
} |
|
1153 |
||
1154 |
||
1155 |
Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) { |
|
1156 |
if (opr->is_virtual()) { |
|
1157 |
return instruction_for_vreg(opr->vreg_number()); |
|
1158 |
} |
|
1159 |
return NULL; |
|
1160 |
} |
|
1161 |
||
1162 |
||
1163 |
Instruction* LIRGenerator::instruction_for_vreg(int reg_num) { |
|
1164 |
if (reg_num < _instruction_for_operand.length()) { |
|
1165 |
return _instruction_for_operand.at(reg_num); |
|
1166 |
} |
|
1167 |
return NULL; |
|
1168 |
} |
|
1169 |
||
1170 |
||
1171 |
void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) { |
|
1172 |
if (_vreg_flags.size_in_bits() == 0) { |
|
1173 |
BitMap2D temp(100, num_vreg_flags); |
|
1174 |
temp.clear(); |
|
1175 |
_vreg_flags = temp; |
|
1176 |
} |
|
1177 |
_vreg_flags.at_put_grow(vreg_num, f, true); |
|
1178 |
} |
|
1179 |
||
1180 |
bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) { |
|
1181 |
if (!_vreg_flags.is_valid_index(vreg_num, f)) { |
|
1182 |
return false; |
|
1183 |
} |
|
1184 |
return _vreg_flags.at(vreg_num, f); |
|
1185 |
} |
|
1186 |
||
1187 |
||
1188 |
// Block local constant handling. This code is useful for keeping |
|
1189 |
// unpinned constants and constants which aren't exposed in the IR in |
|
1190 |
// registers. Unpinned Constant instructions have their operands |
|
1191 |
// cleared when the block is finished so that other blocks can't end |
|
1192 |
// up referring to their registers. |
|
1193 |
||
1194 |
LIR_Opr LIRGenerator::load_constant(Constant* x) { |
|
1195 |
assert(!x->is_pinned(), "only for unpinned constants"); |
|
1196 |
_unpinned_constants.append(x); |
|
1197 |
return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr()); |
|
1198 |
} |
|
1199 |
||
1200 |
||
1201 |
LIR_Opr LIRGenerator::load_constant(LIR_Const* c) { |
|
1202 |
BasicType t = c->type(); |
|
1203 |
for (int i = 0; i < _constants.length(); i++) { |
|
1204 |
LIR_Const* other = _constants.at(i); |
|
1205 |
if (t == other->type()) { |
|
1206 |
switch (t) { |
|
1207 |
case T_INT: |
|
1208 |
case T_FLOAT: |
|
1209 |
if (c->as_jint_bits() != other->as_jint_bits()) continue; |
|
1210 |
break; |
|
1211 |
case T_LONG: |
|
1212 |
case T_DOUBLE: |
|
1673
fe654c35dfe2
6757316: load_constant() produces a wrong long constant, with high a low words swapped
never
parents:
1412
diff
changeset
|
1213 |
if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue; |
fe654c35dfe2
6757316: load_constant() produces a wrong long constant, with high a low words swapped
never
parents:
1412
diff
changeset
|
1214 |
if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue; |
1 | 1215 |
break; |
1216 |
case T_OBJECT: |
|
1217 |
if (c->as_jobject() != other->as_jobject()) continue; |
|
1218 |
break; |
|
1219 |
} |
|
1220 |
return _reg_for_constants.at(i); |
|
1221 |
} |
|
1222 |
} |
|
1223 |
||
1224 |
LIR_Opr result = new_register(t); |
|
1225 |
__ move((LIR_Opr)c, result); |
|
1226 |
_constants.append(c); |
|
1227 |
_reg_for_constants.append(result); |
|
1228 |
return result; |
|
1229 |
} |
|
1230 |
||
1231 |
// Various barriers |
|
1232 |
||
1374 | 1233 |
void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) { |
1234 |
// Do the pre-write barrier, if any. |
|
1235 |
switch (_bs->kind()) { |
|
1236 |
#ifndef SERIALGC |
|
1237 |
case BarrierSet::G1SATBCT: |
|
1238 |
case BarrierSet::G1SATBCTLogging: |
|
1239 |
G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info); |
|
1240 |
break; |
|
1241 |
#endif // SERIALGC |
|
1242 |
case BarrierSet::CardTableModRef: |
|
1243 |
case BarrierSet::CardTableExtension: |
|
1244 |
// No pre barriers |
|
1245 |
break; |
|
1246 |
case BarrierSet::ModRef: |
|
1247 |
case BarrierSet::Other: |
|
1248 |
// No pre barriers |
|
1249 |
break; |
|
1250 |
default : |
|
1251 |
ShouldNotReachHere(); |
|
1252 |
||
1253 |
} |
|
1254 |
} |
|
1255 |
||
1 | 1256 |
void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { |
1374 | 1257 |
switch (_bs->kind()) { |
1258 |
#ifndef SERIALGC |
|
1259 |
case BarrierSet::G1SATBCT: |
|
1260 |
case BarrierSet::G1SATBCTLogging: |
|
1261 |
G1SATBCardTableModRef_post_barrier(addr, new_val); |
|
1262 |
break; |
|
1263 |
#endif // SERIALGC |
|
1 | 1264 |
case BarrierSet::CardTableModRef: |
1265 |
case BarrierSet::CardTableExtension: |
|
1266 |
CardTableModRef_post_barrier(addr, new_val); |
|
1267 |
break; |
|
1268 |
case BarrierSet::ModRef: |
|
1269 |
case BarrierSet::Other: |
|
1270 |
// No post barriers |
|
1271 |
break; |
|
1272 |
default : |
|
1273 |
ShouldNotReachHere(); |
|
1274 |
} |
|
1275 |
} |
|
1276 |
||
1374 | 1277 |
//////////////////////////////////////////////////////////////////////// |
1278 |
#ifndef SERIALGC |
|
1279 |
||
1280 |
void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) { |
|
1281 |
if (G1DisablePreBarrier) return; |
|
1282 |
||
1283 |
// First we test whether marking is in progress. |
|
1284 |
BasicType flag_type; |
|
1285 |
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { |
|
1286 |
flag_type = T_INT; |
|
1287 |
} else { |
|
1288 |
guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, |
|
1289 |
"Assumption"); |
|
1290 |
flag_type = T_BYTE; |
|
1291 |
} |
|
1292 |
LIR_Opr thrd = getThreadPointer(); |
|
1293 |
LIR_Address* mark_active_flag_addr = |
|
1294 |
new LIR_Address(thrd, |
|
1295 |
in_bytes(JavaThread::satb_mark_queue_offset() + |
|
1296 |
PtrQueue::byte_offset_of_active()), |
|
1297 |
flag_type); |
|
1298 |
// Read the marking-in-progress flag. |
|
1299 |
LIR_Opr flag_val = new_register(T_INT); |
|
1300 |
__ load(mark_active_flag_addr, flag_val); |
|
1301 |
||
1302 |
LabelObj* start_store = new LabelObj(); |
|
1303 |
||
1304 |
LIR_PatchCode pre_val_patch_code = |
|
1305 |
patch ? lir_patch_normal : lir_patch_none; |
|
1306 |
||
1307 |
LIR_Opr pre_val = new_register(T_OBJECT); |
|
1308 |
||
1309 |
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); |
|
1310 |
if (!addr_opr->is_address()) { |
|
1311 |
assert(addr_opr->is_register(), "must be"); |
|
1312 |
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, 0, T_OBJECT)); |
|
1313 |
} |
|
1314 |
CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, |
|
1315 |
info); |
|
1316 |
__ branch(lir_cond_notEqual, T_INT, slow); |
|
1317 |
__ branch_destination(slow->continuation()); |
|
1318 |
} |
|
1319 |
||
1320 |
void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { |
|
1321 |
if (G1DisablePostBarrier) return; |
|
1322 |
||
1323 |
// If the "new_val" is a constant NULL, no barrier is necessary. |
|
1324 |
if (new_val->is_constant() && |
|
1325 |
new_val->as_constant_ptr()->as_jobject() == NULL) return; |
|
1326 |
||
1327 |
if (!new_val->is_register()) { |
|
1328 |
LIR_Opr new_val_reg = new_pointer_register(); |
|
1329 |
if (new_val->is_constant()) { |
|
1330 |
__ move(new_val, new_val_reg); |
|
1331 |
} else { |
|
1332 |
__ leal(new_val, new_val_reg); |
|
1333 |
} |
|
1334 |
new_val = new_val_reg; |
|
1335 |
} |
|
1336 |
assert(new_val->is_register(), "must be a register at this point"); |
|
1337 |
||
1338 |
if (addr->is_address()) { |
|
1339 |
LIR_Address* address = addr->as_address_ptr(); |
|
1340 |
LIR_Opr ptr = new_pointer_register(); |
|
1341 |
if (!address->index()->is_valid() && address->disp() == 0) { |
|
1342 |
__ move(address->base(), ptr); |
|
1343 |
} else { |
|
1344 |
assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); |
|
1345 |
__ leal(addr, ptr); |
|
1346 |
} |
|
1347 |
addr = ptr; |
|
1348 |
} |
|
1349 |
assert(addr->is_register(), "must be a register at this point"); |
|
1350 |
||
1351 |
LIR_Opr xor_res = new_pointer_register(); |
|
1352 |
LIR_Opr xor_shift_res = new_pointer_register(); |
|
1353 |
||
1354 |
if (TwoOperandLIRForm ) { |
|
1355 |
__ move(addr, xor_res); |
|
1356 |
__ logical_xor(xor_res, new_val, xor_res); |
|
1357 |
__ move(xor_res, xor_shift_res); |
|
1358 |
__ unsigned_shift_right(xor_shift_res, |
|
1359 |
LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), |
|
1360 |
xor_shift_res, |
|
1361 |
LIR_OprDesc::illegalOpr()); |
|
1362 |
} else { |
|
1363 |
__ logical_xor(addr, new_val, xor_res); |
|
1364 |
__ unsigned_shift_right(xor_res, |
|
1365 |
LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), |
|
1366 |
xor_shift_res, |
|
1367 |
LIR_OprDesc::illegalOpr()); |
|
1368 |
} |
|
1369 |
||
1370 |
if (!new_val->is_register()) { |
|
1371 |
LIR_Opr new_val_reg = new_pointer_register(); |
|
1372 |
__ leal(new_val, new_val_reg); |
|
1373 |
new_val = new_val_reg; |
|
1374 |
} |
|
1375 |
assert(new_val->is_register(), "must be a register at this point"); |
|
1376 |
||
1377 |
__ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD)); |
|
1378 |
||
1379 |
CodeStub* slow = new G1PostBarrierStub(addr, new_val); |
|
1380 |
__ branch(lir_cond_notEqual, T_INT, slow); |
|
1381 |
__ branch_destination(slow->continuation()); |
|
1382 |
} |
|
1383 |
||
1384 |
#endif // SERIALGC |
|
1385 |
//////////////////////////////////////////////////////////////////////// |
|
1386 |
||
1 | 1387 |
void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { |
1388 |
||
1374 | 1389 |
assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code"); |
1390 |
LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base); |
|
1 | 1391 |
if (addr->is_address()) { |
1392 |
LIR_Address* address = addr->as_address_ptr(); |
|
1393 |
LIR_Opr ptr = new_register(T_OBJECT); |
|
1394 |
if (!address->index()->is_valid() && address->disp() == 0) { |
|
1395 |
__ move(address->base(), ptr); |
|
1396 |
} else { |
|
1397 |
assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); |
|
1398 |
__ leal(addr, ptr); |
|
1399 |
} |
|
1400 |
addr = ptr; |
|
1401 |
} |
|
1402 |
assert(addr->is_register(), "must be a register at this point"); |
|
1403 |
||
1404 |
LIR_Opr tmp = new_pointer_register(); |
|
1405 |
if (TwoOperandLIRForm) { |
|
1406 |
__ move(addr, tmp); |
|
1407 |
__ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp); |
|
1408 |
} else { |
|
1409 |
__ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp); |
|
1410 |
} |
|
1411 |
if (can_inline_as_constant(card_table_base)) { |
|
1412 |
__ move(LIR_OprFact::intConst(0), |
|
1413 |
new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE)); |
|
1414 |
} else { |
|
1415 |
__ move(LIR_OprFact::intConst(0), |
|
1416 |
new LIR_Address(tmp, load_constant(card_table_base), |
|
1417 |
T_BYTE)); |
|
1418 |
} |
|
1419 |
} |
|
1420 |
||
1421 |
||
1422 |
//------------------------field access-------------------------------------- |
|
1423 |
||
1424 |
// Comment copied form templateTable_i486.cpp |
|
1425 |
// ---------------------------------------------------------------------------- |
|
1426 |
// Volatile variables demand their effects be made known to all CPU's in |
|
1427 |
// order. Store buffers on most chips allow reads & writes to reorder; the |
|
1428 |
// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of |
|
1429 |
// memory barrier (i.e., it's not sufficient that the interpreter does not |
|
1430 |
// reorder volatile references, the hardware also must not reorder them). |
|
1431 |
// |
|
1432 |
// According to the new Java Memory Model (JMM): |
|
1433 |
// (1) All volatiles are serialized wrt to each other. |
|
1434 |
// ALSO reads & writes act as aquire & release, so: |
|
1435 |
// (2) A read cannot let unrelated NON-volatile memory refs that happen after |
|
1436 |
// the read float up to before the read. It's OK for non-volatile memory refs |
|
1437 |
// that happen before the volatile read to float down below it. |
|
1438 |
// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs |
|
1439 |
// that happen BEFORE the write float down to after the write. It's OK for |
|
1440 |
// non-volatile memory refs that happen after the volatile write to float up |
|
1441 |
// before it. |
|
1442 |
// |
|
1443 |
// We only put in barriers around volatile refs (they are expensive), not |
|
1444 |
// _between_ memory refs (that would require us to track the flavor of the |
|
1445 |
// previous memory refs). Requirements (2) and (3) require some barriers |
|
1446 |
// before volatile stores and after volatile loads. These nearly cover |
|
1447 |
// requirement (1) but miss the volatile-store-volatile-load case. This final |
|
1448 |
// case is placed after volatile-stores although it could just as well go |
|
1449 |
// before volatile-loads. |
|
1450 |
||
1451 |
||
1452 |
void LIRGenerator::do_StoreField(StoreField* x) { |
|
1453 |
bool needs_patching = x->needs_patching(); |
|
1454 |
bool is_volatile = x->field()->is_volatile(); |
|
1455 |
BasicType field_type = x->field_type(); |
|
1456 |
bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT); |
|
1457 |
||
1458 |
CodeEmitInfo* info = NULL; |
|
1459 |
if (needs_patching) { |
|
1460 |
assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); |
|
1461 |
info = state_for(x, x->state_before()); |
|
1462 |
} else if (x->needs_null_check()) { |
|
1463 |
NullCheck* nc = x->explicit_null_check(); |
|
1464 |
if (nc == NULL) { |
|
1465 |
info = state_for(x, x->lock_stack()); |
|
1466 |
} else { |
|
1467 |
info = state_for(nc); |
|
1468 |
} |
|
1469 |
} |
|
1470 |
||
1471 |
||
1472 |
LIRItem object(x->obj(), this); |
|
1473 |
LIRItem value(x->value(), this); |
|
1474 |
||
1475 |
object.load_item(); |
|
1476 |
||
1477 |
if (is_volatile || needs_patching) { |
|
1478 |
// load item if field is volatile (fewer special cases for volatiles) |
|
1479 |
// load item if field not initialized |
|
1480 |
// load item if field not constant |
|
1481 |
// because of code patching we cannot inline constants |
|
1482 |
if (field_type == T_BYTE || field_type == T_BOOLEAN) { |
|
1483 |
value.load_byte_item(); |
|
1484 |
} else { |
|
1485 |
value.load_item(); |
|
1486 |
} |
|
1487 |
} else { |
|
1488 |
value.load_for_store(field_type); |
|
1489 |
} |
|
1490 |
||
1491 |
set_no_result(x); |
|
1492 |
||
1493 |
if (PrintNotLoaded && needs_patching) { |
|
1494 |
tty->print_cr(" ###class not loaded at store_%s bci %d", |
|
1495 |
x->is_static() ? "static" : "field", x->bci()); |
|
1496 |
} |
|
1497 |
||
1498 |
if (x->needs_null_check() && |
|
1499 |
(needs_patching || |
|
1500 |
MacroAssembler::needs_explicit_null_check(x->offset()))) { |
|
1501 |
// emit an explicit null check because the offset is too large |
|
1502 |
__ null_check(object.result(), new CodeEmitInfo(info)); |
|
1503 |
} |
|
1504 |
||
1505 |
LIR_Address* address; |
|
1506 |
if (needs_patching) { |
|
1507 |
// we need to patch the offset in the instruction so don't allow |
|
1508 |
// generate_address to try to be smart about emitting the -1. |
|
1509 |
// Otherwise the patching code won't know how to find the |
|
1510 |
// instruction to patch. |
|
1511 |
address = new LIR_Address(object.result(), max_jint, field_type); |
|
1512 |
} else { |
|
1513 |
address = generate_address(object.result(), x->offset(), field_type); |
|
1514 |
} |
|
1515 |
||
1516 |
if (is_volatile && os::is_MP()) { |
|
1517 |
__ membar_release(); |
|
1518 |
} |
|
1519 |
||
1374 | 1520 |
if (is_oop) { |
1521 |
// Do the pre-write barrier, if any. |
|
1522 |
pre_barrier(LIR_OprFact::address(address), |
|
1523 |
needs_patching, |
|
1524 |
(info ? new CodeEmitInfo(info) : NULL)); |
|
1525 |
} |
|
1526 |
||
1 | 1527 |
if (is_volatile) { |
1528 |
assert(!needs_patching && x->is_loaded(), |
|
1529 |
"how do we know it's volatile if it's not loaded"); |
|
1530 |
volatile_field_store(value.result(), address, info); |
|
1531 |
} else { |
|
1532 |
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; |
|
1533 |
__ store(value.result(), address, info, patch_code); |
|
1534 |
} |
|
1535 |
||
1536 |
if (is_oop) { |
|
1374 | 1537 |
#ifdef PRECISE_CARDMARK |
1538 |
// Precise cardmarks don't work |
|
1539 |
post_barrier(LIR_OprFact::address(address), value.result()); |
|
1540 |
#else |
|
1 | 1541 |
post_barrier(object.result(), value.result()); |
1374 | 1542 |
#endif // PRECISE_CARDMARK |
1 | 1543 |
} |
1544 |
||
1545 |
if (is_volatile && os::is_MP()) { |
|
1546 |
__ membar(); |
|
1547 |
} |
|
1548 |
} |
|
1549 |
||
1550 |
||
1551 |
void LIRGenerator::do_LoadField(LoadField* x) { |
|
1552 |
bool needs_patching = x->needs_patching(); |
|
1553 |
bool is_volatile = x->field()->is_volatile(); |
|
1554 |
BasicType field_type = x->field_type(); |
|
1555 |
||
1556 |
CodeEmitInfo* info = NULL; |
|
1557 |
if (needs_patching) { |
|
1558 |
assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); |
|
1559 |
info = state_for(x, x->state_before()); |
|
1560 |
} else if (x->needs_null_check()) { |
|
1561 |
NullCheck* nc = x->explicit_null_check(); |
|
1562 |
if (nc == NULL) { |
|
1563 |
info = state_for(x, x->lock_stack()); |
|
1564 |
} else { |
|
1565 |
info = state_for(nc); |
|
1566 |
} |
|
1567 |
} |
|
1568 |
||
1569 |
LIRItem object(x->obj(), this); |
|
1570 |
||
1571 |
object.load_item(); |
|
1572 |
||
1573 |
if (PrintNotLoaded && needs_patching) { |
|
1574 |
tty->print_cr(" ###class not loaded at load_%s bci %d", |
|
1575 |
x->is_static() ? "static" : "field", x->bci()); |
|
1576 |
} |
|
1577 |
||
1578 |
if (x->needs_null_check() && |
|
1579 |
(needs_patching || |
|
1580 |
MacroAssembler::needs_explicit_null_check(x->offset()))) { |
|
1581 |
// emit an explicit null check because the offset is too large |
|
1582 |
__ null_check(object.result(), new CodeEmitInfo(info)); |
|
1583 |
} |
|
1584 |
||
1585 |
LIR_Opr reg = rlock_result(x, field_type); |
|
1586 |
LIR_Address* address; |
|
1587 |
if (needs_patching) { |
|
1588 |
// we need to patch the offset in the instruction so don't allow |
|
1589 |
// generate_address to try to be smart about emitting the -1. |
|
1590 |
// Otherwise the patching code won't know how to find the |
|
1591 |
// instruction to patch. |
|
1592 |
address = new LIR_Address(object.result(), max_jint, field_type); |
|
1593 |
} else { |
|
1594 |
address = generate_address(object.result(), x->offset(), field_type); |
|
1595 |
} |
|
1596 |
||
1597 |
if (is_volatile) { |
|
1598 |
assert(!needs_patching && x->is_loaded(), |
|
1599 |
"how do we know it's volatile if it's not loaded"); |
|
1600 |
volatile_field_load(address, reg, info); |
|
1601 |
} else { |
|
1602 |
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; |
|
1603 |
__ load(address, reg, info, patch_code); |
|
1604 |
} |
|
1605 |
||
1606 |
if (is_volatile && os::is_MP()) { |
|
1607 |
__ membar_acquire(); |
|
1608 |
} |
|
1609 |
} |
|
1610 |
||
1611 |
||
1612 |
//------------------------java.nio.Buffer.checkIndex------------------------ |
|
1613 |
||
1614 |
// int java.nio.Buffer.checkIndex(int) |
|
1615 |
void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) { |
|
1616 |
// NOTE: by the time we are in checkIndex() we are guaranteed that |
|
1617 |
// the buffer is non-null (because checkIndex is package-private and |
|
1618 |
// only called from within other methods in the buffer). |
|
1619 |
assert(x->number_of_arguments() == 2, "wrong type"); |
|
1620 |
LIRItem buf (x->argument_at(0), this); |
|
1621 |
LIRItem index(x->argument_at(1), this); |
|
1622 |
buf.load_item(); |
|
1623 |
index.load_item(); |
|
1624 |
||
1625 |
LIR_Opr result = rlock_result(x); |
|
1626 |
if (GenerateRangeChecks) { |
|
1627 |
CodeEmitInfo* info = state_for(x); |
|
1628 |
CodeStub* stub = new RangeCheckStub(info, index.result(), true); |
|
1629 |
if (index.result()->is_constant()) { |
|
1630 |
cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info); |
|
1631 |
__ branch(lir_cond_belowEqual, T_INT, stub); |
|
1632 |
} else { |
|
1633 |
cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(), |
|
1634 |
java_nio_Buffer::limit_offset(), T_INT, info); |
|
1635 |
__ branch(lir_cond_aboveEqual, T_INT, stub); |
|
1636 |
} |
|
1637 |
__ move(index.result(), result); |
|
1638 |
} else { |
|
1639 |
// Just load the index into the result register |
|
1640 |
__ move(index.result(), result); |
|
1641 |
} |
|
1642 |
} |
|
1643 |
||
1644 |
||
1645 |
//------------------------array access-------------------------------------- |
|
1646 |
||
1647 |
||
1648 |
void LIRGenerator::do_ArrayLength(ArrayLength* x) { |
|
1649 |
LIRItem array(x->array(), this); |
|
1650 |
array.load_item(); |
|
1651 |
LIR_Opr reg = rlock_result(x); |
|
1652 |
||
1653 |
CodeEmitInfo* info = NULL; |
|
1654 |
if (x->needs_null_check()) { |
|
1655 |
NullCheck* nc = x->explicit_null_check(); |
|
1656 |
if (nc == NULL) { |
|
1657 |
info = state_for(x); |
|
1658 |
} else { |
|
1659 |
info = state_for(nc); |
|
1660 |
} |
|
1661 |
} |
|
1662 |
__ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); |
|
1663 |
} |
|
1664 |
||
1665 |
||
1666 |
void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { |
|
1667 |
bool use_length = x->length() != NULL; |
|
1668 |
LIRItem array(x->array(), this); |
|
1669 |
LIRItem index(x->index(), this); |
|
1670 |
LIRItem length(this); |
|
1671 |
bool needs_range_check = true; |
|
1672 |
||
1673 |
if (use_length) { |
|
1674 |
needs_range_check = x->compute_needs_range_check(); |
|
1675 |
if (needs_range_check) { |
|
1676 |
length.set_instruction(x->length()); |
|
1677 |
length.load_item(); |
|
1678 |
} |
|
1679 |
} |
|
1680 |
||
1681 |
array.load_item(); |
|
1682 |
if (index.is_constant() && can_inline_as_constant(x->index())) { |
|
1683 |
// let it be a constant |
|
1684 |
index.dont_load_item(); |
|
1685 |
} else { |
|
1686 |
index.load_item(); |
|
1687 |
} |
|
1688 |
||
1689 |
CodeEmitInfo* range_check_info = state_for(x); |
|
1690 |
CodeEmitInfo* null_check_info = NULL; |
|
1691 |
if (x->needs_null_check()) { |
|
1692 |
NullCheck* nc = x->explicit_null_check(); |
|
1693 |
if (nc != NULL) { |
|
1694 |
null_check_info = state_for(nc); |
|
1695 |
} else { |
|
1696 |
null_check_info = range_check_info; |
|
1697 |
} |
|
1698 |
} |
|
1699 |
||
1700 |
// emit array address setup early so it schedules better |
|
1701 |
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); |
|
1702 |
||
1703 |
if (GenerateRangeChecks && needs_range_check) { |
|
1704 |
if (use_length) { |
|
1705 |
// TODO: use a (modified) version of array_range_check that does not require a |
|
1706 |
// constant length to be loaded to a register |
|
1707 |
__ cmp(lir_cond_belowEqual, length.result(), index.result()); |
|
1708 |
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); |
|
1709 |
} else { |
|
1710 |
array_range_check(array.result(), index.result(), null_check_info, range_check_info); |
|
1711 |
// The range check performs the null check, so clear it out for the load |
|
1712 |
null_check_info = NULL; |
|
1713 |
} |
|
1714 |
} |
|
1715 |
||
1716 |
__ move(array_addr, rlock_result(x, x->elt_type()), null_check_info); |
|
1717 |
} |
|
1718 |
||
1719 |
||
1720 |
void LIRGenerator::do_NullCheck(NullCheck* x) { |
|
1721 |
if (x->can_trap()) { |
|
1722 |
LIRItem value(x->obj(), this); |
|
1723 |
value.load_item(); |
|
1724 |
CodeEmitInfo* info = state_for(x); |
|
1725 |
__ null_check(value.result(), info); |
|
1726 |
} |
|
1727 |
} |
|
1728 |
||
1729 |
||
1730 |
void LIRGenerator::do_Throw(Throw* x) { |
|
1731 |
LIRItem exception(x->exception(), this); |
|
1732 |
exception.load_item(); |
|
1733 |
set_no_result(x); |
|
1734 |
LIR_Opr exception_opr = exception.result(); |
|
1735 |
CodeEmitInfo* info = state_for(x, x->state()); |
|
1736 |
||
1737 |
#ifndef PRODUCT |
|
1738 |
if (PrintC1Statistics) { |
|
1739 |
increment_counter(Runtime1::throw_count_address()); |
|
1740 |
} |
|
1741 |
#endif |
|
1742 |
||
1743 |
// check if the instruction has an xhandler in any of the nested scopes |
|
1744 |
bool unwind = false; |
|
1745 |
if (info->exception_handlers()->length() == 0) { |
|
1746 |
// this throw is not inside an xhandler |
|
1747 |
unwind = true; |
|
1748 |
} else { |
|
1749 |
// get some idea of the throw type |
|
1750 |
bool type_is_exact = true; |
|
1751 |
ciType* throw_type = x->exception()->exact_type(); |
|
1752 |
if (throw_type == NULL) { |
|
1753 |
type_is_exact = false; |
|
1754 |
throw_type = x->exception()->declared_type(); |
|
1755 |
} |
|
1756 |
if (throw_type != NULL && throw_type->is_instance_klass()) { |
|
1757 |
ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type; |
|
1758 |
unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact); |
|
1759 |
} |
|
1760 |
} |
|
1761 |
||
1762 |
// do null check before moving exception oop into fixed register |
|
1763 |
// to avoid a fixed interval with an oop during the null check. |
|
1764 |
// Use a copy of the CodeEmitInfo because debug information is |
|
1765 |
// different for null_check and throw. |
|
1766 |
if (GenerateCompilerNullChecks && |
|
1767 |
(x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) { |
|
1768 |
// if the exception object wasn't created using new then it might be null. |
|
1769 |
__ null_check(exception_opr, new CodeEmitInfo(info, true)); |
|
1770 |
} |
|
1771 |
||
1772 |
if (JvmtiExport::can_post_exceptions() && |
|
1773 |
!block()->is_set(BlockBegin::default_exception_handler_flag)) { |
|
1774 |
// we need to go through the exception lookup path to get JVMTI |
|
1775 |
// notification done |
|
1776 |
unwind = false; |
|
1777 |
} |
|
1778 |
||
1779 |
assert(!block()->is_set(BlockBegin::default_exception_handler_flag) || unwind, |
|
1780 |
"should be no more handlers to dispatch to"); |
|
1781 |
||
1782 |
if (DTraceMethodProbes && |
|
1783 |
block()->is_set(BlockBegin::default_exception_handler_flag)) { |
|
1784 |
// notify that this frame is unwinding |
|
1785 |
BasicTypeList signature; |
|
1786 |
signature.append(T_INT); // thread |
|
1787 |
signature.append(T_OBJECT); // methodOop |
|
1788 |
LIR_OprList* args = new LIR_OprList(); |
|
1789 |
args->append(getThreadPointer()); |
|
1790 |
LIR_Opr meth = new_register(T_OBJECT); |
|
1791 |
__ oop2reg(method()->encoding(), meth); |
|
1792 |
args->append(meth); |
|
1793 |
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL); |
|
1794 |
} |
|
1795 |
||
1796 |
// move exception oop into fixed register |
|
1797 |
__ move(exception_opr, exceptionOopOpr()); |
|
1798 |
||
1799 |
if (unwind) { |
|
1800 |
__ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info); |
|
1801 |
} else { |
|
1802 |
__ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info); |
|
1803 |
} |
|
1804 |
} |
|
1805 |
||
1806 |
||
1807 |
void LIRGenerator::do_RoundFP(RoundFP* x) { |
|
1808 |
LIRItem input(x->input(), this); |
|
1809 |
input.load_item(); |
|
1810 |
LIR_Opr input_opr = input.result(); |
|
1811 |
assert(input_opr->is_register(), "why round if value is not in a register?"); |
|
1812 |
assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value"); |
|
1813 |
if (input_opr->is_single_fpu()) { |
|
1814 |
set_result(x, round_item(input_opr)); // This code path not currently taken |
|
1815 |
} else { |
|
1816 |
LIR_Opr result = new_register(T_DOUBLE); |
|
1817 |
set_vreg_flag(result, must_start_in_memory); |
|
1818 |
__ roundfp(input_opr, LIR_OprFact::illegalOpr, result); |
|
1819 |
set_result(x, result); |
|
1820 |
} |
|
1821 |
} |
|
1822 |
||
1823 |
void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { |
|
1824 |
LIRItem base(x->base(), this); |
|
1825 |
LIRItem idx(this); |
|
1826 |
||
1827 |
base.load_item(); |
|
1828 |
if (x->has_index()) { |
|
1829 |
idx.set_instruction(x->index()); |
|
1830 |
idx.load_nonconstant(); |
|
1831 |
} |
|
1832 |
||
1833 |
LIR_Opr reg = rlock_result(x, x->basic_type()); |
|
1834 |
||
1835 |
int log2_scale = 0; |
|
1836 |
if (x->has_index()) { |
|
1837 |
assert(x->index()->type()->tag() == intTag, "should not find non-int index"); |
|
1838 |
log2_scale = x->log2_scale(); |
|
1839 |
} |
|
1840 |
||
1841 |
assert(!x->has_index() || idx.value() == x->index(), "should match"); |
|
1842 |
||
1843 |
LIR_Opr base_op = base.result(); |
|
1844 |
#ifndef _LP64 |
|
1845 |
if (x->base()->type()->tag() == longTag) { |
|
1846 |
base_op = new_register(T_INT); |
|
1847 |
__ convert(Bytecodes::_l2i, base.result(), base_op); |
|
1848 |
} else { |
|
1849 |
assert(x->base()->type()->tag() == intTag, "must be"); |
|
1850 |
} |
|
1851 |
#endif |
|
1852 |
||
1853 |
BasicType dst_type = x->basic_type(); |
|
1854 |
LIR_Opr index_op = idx.result(); |
|
1855 |
||
1856 |
LIR_Address* addr; |
|
1857 |
if (index_op->is_constant()) { |
|
1858 |
assert(log2_scale == 0, "must not have a scale"); |
|
1859 |
addr = new LIR_Address(base_op, index_op->as_jint(), dst_type); |
|
1860 |
} else { |
|
1066 | 1861 |
#ifdef X86 |
1 | 1862 |
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); |
1863 |
#else |
|
1864 |
if (index_op->is_illegal() || log2_scale == 0) { |
|
1865 |
addr = new LIR_Address(base_op, index_op, dst_type); |
|
1866 |
} else { |
|
1867 |
LIR_Opr tmp = new_register(T_INT); |
|
1868 |
__ shift_left(index_op, log2_scale, tmp); |
|
1869 |
addr = new LIR_Address(base_op, tmp, dst_type); |
|
1870 |
} |
|
1871 |
#endif |
|
1872 |
} |
|
1873 |
||
1874 |
if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) { |
|
1875 |
__ unaligned_move(addr, reg); |
|
1876 |
} else { |
|
1877 |
__ move(addr, reg); |
|
1878 |
} |
|
1879 |
} |
|
1880 |
||
1881 |
||
1882 |
void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { |
|
1883 |
int log2_scale = 0; |
|
1884 |
BasicType type = x->basic_type(); |
|
1885 |
||
1886 |
if (x->has_index()) { |
|
1887 |
assert(x->index()->type()->tag() == intTag, "should not find non-int index"); |
|
1888 |
log2_scale = x->log2_scale(); |
|
1889 |
} |
|
1890 |
||
1891 |
LIRItem base(x->base(), this); |
|
1892 |
LIRItem value(x->value(), this); |
|
1893 |
LIRItem idx(this); |
|
1894 |
||
1895 |
base.load_item(); |
|
1896 |
if (x->has_index()) { |
|
1897 |
idx.set_instruction(x->index()); |
|
1898 |
idx.load_item(); |
|
1899 |
} |
|
1900 |
||
1901 |
if (type == T_BYTE || type == T_BOOLEAN) { |
|
1902 |
value.load_byte_item(); |
|
1903 |
} else { |
|
1904 |
value.load_item(); |
|
1905 |
} |
|
1906 |
||
1907 |
set_no_result(x); |
|
1908 |
||
1909 |
LIR_Opr base_op = base.result(); |
|
1910 |
#ifndef _LP64 |
|
1911 |
if (x->base()->type()->tag() == longTag) { |
|
1912 |
base_op = new_register(T_INT); |
|
1913 |
__ convert(Bytecodes::_l2i, base.result(), base_op); |
|
1914 |
} else { |
|
1915 |
assert(x->base()->type()->tag() == intTag, "must be"); |
|
1916 |
} |
|
1917 |
#endif |
|
1918 |
||
1919 |
LIR_Opr index_op = idx.result(); |
|
1920 |
if (log2_scale != 0) { |
|
1921 |
// temporary fix (platform dependent code without shift on Intel would be better) |
|
1922 |
index_op = new_register(T_INT); |
|
1923 |
__ move(idx.result(), index_op); |
|
1924 |
__ shift_left(index_op, log2_scale, index_op); |
|
1925 |
} |
|
1926 |
||
1927 |
LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type()); |
|
1928 |
__ move(value.result(), addr); |
|
1929 |
} |
|
1930 |
||
1931 |
||
1932 |
void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) { |
|
1933 |
BasicType type = x->basic_type(); |
|
1934 |
LIRItem src(x->object(), this); |
|
1935 |
LIRItem off(x->offset(), this); |
|
1936 |
||
1937 |
off.load_item(); |
|
1938 |
src.load_item(); |
|
1939 |
||
1940 |
LIR_Opr reg = reg = rlock_result(x, x->basic_type()); |
|
1941 |
||
1942 |
if (x->is_volatile() && os::is_MP()) __ membar_acquire(); |
|
1943 |
get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile()); |
|
1944 |
if (x->is_volatile() && os::is_MP()) __ membar(); |
|
1945 |
} |
|
1946 |
||
1947 |
||
1948 |
void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) { |
|
1949 |
BasicType type = x->basic_type(); |
|
1950 |
LIRItem src(x->object(), this); |
|
1951 |
LIRItem off(x->offset(), this); |
|
1952 |
LIRItem data(x->value(), this); |
|
1953 |
||
1954 |
src.load_item(); |
|
1955 |
if (type == T_BOOLEAN || type == T_BYTE) { |
|
1956 |
data.load_byte_item(); |
|
1957 |
} else { |
|
1958 |
data.load_item(); |
|
1959 |
} |
|
1960 |
off.load_item(); |
|
1961 |
||
1962 |
set_no_result(x); |
|
1963 |
||
1964 |
if (x->is_volatile() && os::is_MP()) __ membar_release(); |
|
1965 |
put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile()); |
|
1966 |
} |
|
1967 |
||
1968 |
||
1969 |
void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) { |
|
1970 |
LIRItem src(x->object(), this); |
|
1971 |
LIRItem off(x->offset(), this); |
|
1972 |
||
1973 |
src.load_item(); |
|
1974 |
if (off.is_constant() && can_inline_as_constant(x->offset())) { |
|
1975 |
// let it be a constant |
|
1976 |
off.dont_load_item(); |
|
1977 |
} else { |
|
1978 |
off.load_item(); |
|
1979 |
} |
|
1980 |
||
1981 |
set_no_result(x); |
|
1982 |
||
1983 |
LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE); |
|
1984 |
__ prefetch(addr, is_store); |
|
1985 |
} |
|
1986 |
||
1987 |
||
1988 |
void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) { |
|
1989 |
do_UnsafePrefetch(x, false); |
|
1990 |
} |
|
1991 |
||
1992 |
||
1993 |
void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { |
|
1994 |
do_UnsafePrefetch(x, true); |
|
1995 |
} |
|
1996 |
||
1997 |
||
1998 |
void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) { |
|
1999 |
int lng = x->length(); |
|
2000 |
||
2001 |
for (int i = 0; i < lng; i++) { |
|
2002 |
SwitchRange* one_range = x->at(i); |
|
2003 |
int low_key = one_range->low_key(); |
|
2004 |
int high_key = one_range->high_key(); |
|
2005 |
BlockBegin* dest = one_range->sux(); |
|
2006 |
if (low_key == high_key) { |
|
2007 |
__ cmp(lir_cond_equal, value, low_key); |
|
2008 |
__ branch(lir_cond_equal, T_INT, dest); |
|
2009 |
} else if (high_key - low_key == 1) { |
|
2010 |
__ cmp(lir_cond_equal, value, low_key); |
|
2011 |
__ branch(lir_cond_equal, T_INT, dest); |
|
2012 |
__ cmp(lir_cond_equal, value, high_key); |
|
2013 |
__ branch(lir_cond_equal, T_INT, dest); |
|
2014 |
} else { |
|
2015 |
LabelObj* L = new LabelObj(); |
|
2016 |
__ cmp(lir_cond_less, value, low_key); |
|
2017 |
__ branch(lir_cond_less, L->label()); |
|
2018 |
__ cmp(lir_cond_lessEqual, value, high_key); |
|
2019 |
__ branch(lir_cond_lessEqual, T_INT, dest); |
|
2020 |
__ branch_destination(L->label()); |
|
2021 |
} |
|
2022 |
} |
|
2023 |
__ jump(default_sux); |
|
2024 |
} |
|
2025 |
||
2026 |
||
2027 |
SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) { |
|
2028 |
SwitchRangeList* res = new SwitchRangeList(); |
|
2029 |
int len = x->length(); |
|
2030 |
if (len > 0) { |
|
2031 |
BlockBegin* sux = x->sux_at(0); |
|
2032 |
int key = x->lo_key(); |
|
2033 |
BlockBegin* default_sux = x->default_sux(); |
|
2034 |
SwitchRange* range = new SwitchRange(key, sux); |
|
2035 |
for (int i = 0; i < len; i++, key++) { |
|
2036 |
BlockBegin* new_sux = x->sux_at(i); |
|
2037 |
if (sux == new_sux) { |
|
2038 |
// still in same range |
|
2039 |
range->set_high_key(key); |
|
2040 |
} else { |
|
2041 |
// skip tests which explicitly dispatch to the default |
|
2042 |
if (sux != default_sux) { |
|
2043 |
res->append(range); |
|
2044 |
} |
|
2045 |
range = new SwitchRange(key, new_sux); |
|
2046 |
} |
|
2047 |
sux = new_sux; |
|
2048 |
} |
|
2049 |
if (res->length() == 0 || res->last() != range) res->append(range); |
|
2050 |
} |
|
2051 |
return res; |
|
2052 |
} |
|
2053 |
||
2054 |
||
2055 |
// we expect the keys to be sorted by increasing value |
|
2056 |
SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) { |
|
2057 |
SwitchRangeList* res = new SwitchRangeList(); |
|
2058 |
int len = x->length(); |
|
2059 |
if (len > 0) { |
|
2060 |
BlockBegin* default_sux = x->default_sux(); |
|
2061 |
int key = x->key_at(0); |
|
2062 |
BlockBegin* sux = x->sux_at(0); |
|
2063 |
SwitchRange* range = new SwitchRange(key, sux); |
|
2064 |
for (int i = 1; i < len; i++) { |
|
2065 |
int new_key = x->key_at(i); |
|
2066 |
BlockBegin* new_sux = x->sux_at(i); |
|
2067 |
if (key+1 == new_key && sux == new_sux) { |
|
2068 |
// still in same range |
|
2069 |
range->set_high_key(new_key); |
|
2070 |
} else { |
|
2071 |
// skip tests which explicitly dispatch to the default |
|
2072 |
if (range->sux() != default_sux) { |
|
2073 |
res->append(range); |
|
2074 |
} |
|
2075 |
range = new SwitchRange(new_key, new_sux); |
|
2076 |
} |
|
2077 |
key = new_key; |
|
2078 |
sux = new_sux; |
|
2079 |
} |
|
2080 |
if (res->length() == 0 || res->last() != range) res->append(range); |
|
2081 |
} |
|
2082 |
return res; |
|
2083 |
} |
|
2084 |
||
2085 |
||
2086 |
void LIRGenerator::do_TableSwitch(TableSwitch* x) { |
|
2087 |
LIRItem tag(x->tag(), this); |
|
2088 |
tag.load_item(); |
|
2089 |
set_no_result(x); |
|
2090 |
||
2091 |
if (x->is_safepoint()) { |
|
2092 |
__ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); |
|
2093 |
} |
|
2094 |
||
2095 |
// move values into phi locations |
|
2096 |
move_to_phi(x->state()); |
|
2097 |
||
2098 |
int lo_key = x->lo_key(); |
|
2099 |
int hi_key = x->hi_key(); |
|
2100 |
int len = x->length(); |
|
2101 |
CodeEmitInfo* info = state_for(x, x->state()); |
|
2102 |
LIR_Opr value = tag.result(); |
|
2103 |
if (UseTableRanges) { |
|
2104 |
do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); |
|
2105 |
} else { |
|
2106 |
for (int i = 0; i < len; i++) { |
|
2107 |
__ cmp(lir_cond_equal, value, i + lo_key); |
|
2108 |
__ branch(lir_cond_equal, T_INT, x->sux_at(i)); |
|
2109 |
} |
|
2110 |
__ jump(x->default_sux()); |
|
2111 |
} |
|
2112 |
} |
|
2113 |
||
2114 |
||
2115 |
void LIRGenerator::do_LookupSwitch(LookupSwitch* x) { |
|
2116 |
LIRItem tag(x->tag(), this); |
|
2117 |
tag.load_item(); |
|
2118 |
set_no_result(x); |
|
2119 |
||
2120 |
if (x->is_safepoint()) { |
|
2121 |
__ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); |
|
2122 |
} |
|
2123 |
||
2124 |
// move values into phi locations |
|
2125 |
move_to_phi(x->state()); |
|
2126 |
||
2127 |
LIR_Opr value = tag.result(); |
|
2128 |
if (UseTableRanges) { |
|
2129 |
do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); |
|
2130 |
} else { |
|
2131 |
int len = x->length(); |
|
2132 |
for (int i = 0; i < len; i++) { |
|
2133 |
__ cmp(lir_cond_equal, value, x->key_at(i)); |
|
2134 |
__ branch(lir_cond_equal, T_INT, x->sux_at(i)); |
|
2135 |
} |
|
2136 |
__ jump(x->default_sux()); |
|
2137 |
} |
|
2138 |
} |
|
2139 |
||
2140 |
||
2141 |
void LIRGenerator::do_Goto(Goto* x) { |
|
2142 |
set_no_result(x); |
|
2143 |
||
2144 |
if (block()->next()->as_OsrEntry()) { |
|
2145 |
// need to free up storage used for OSR entry point |
|
2146 |
LIR_Opr osrBuffer = block()->next()->operand(); |
|
2147 |
BasicTypeList signature; |
|
2148 |
signature.append(T_INT); |
|
2149 |
CallingConvention* cc = frame_map()->c_calling_convention(&signature); |
|
2150 |
__ move(osrBuffer, cc->args()->at(0)); |
|
2151 |
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end), |
|
2152 |
getThreadTemp(), LIR_OprFact::illegalOpr, cc->args()); |
|
2153 |
} |
|
2154 |
||
2155 |
if (x->is_safepoint()) { |
|
2156 |
ValueStack* state = x->state_before() ? x->state_before() : x->state(); |
|
2157 |
||
2158 |
// increment backedge counter if needed |
|
2159 |
increment_backedge_counter(state_for(x, state)); |
|
2160 |
||
2161 |
CodeEmitInfo* safepoint_info = state_for(x, state); |
|
2162 |
__ safepoint(safepoint_poll_register(), safepoint_info); |
|
2163 |
} |
|
2164 |
||
2165 |
// emit phi-instruction move after safepoint since this simplifies |
|
2166 |
// describing the state as the safepoint. |
|
2167 |
move_to_phi(x->state()); |
|
2168 |
||
2169 |
__ jump(x->default_sux()); |
|
2170 |
} |
|
2171 |
||
2172 |
||
2173 |
void LIRGenerator::do_Base(Base* x) { |
|
2174 |
__ std_entry(LIR_OprFact::illegalOpr); |
|
2175 |
// Emit moves from physical registers / stack slots to virtual registers |
|
2176 |
CallingConvention* args = compilation()->frame_map()->incoming_arguments(); |
|
2177 |
IRScope* irScope = compilation()->hir()->top_scope(); |
|
2178 |
int java_index = 0; |
|
2179 |
for (int i = 0; i < args->length(); i++) { |
|
2180 |
LIR_Opr src = args->at(i); |
|
2181 |
assert(!src->is_illegal(), "check"); |
|
2182 |
BasicType t = src->type(); |
|
2183 |
||
2184 |
// Types which are smaller than int are passed as int, so |
|
2185 |
// correct the type which passed. |
|
2186 |
switch (t) { |
|
2187 |
case T_BYTE: |
|
2188 |
case T_BOOLEAN: |
|
2189 |
case T_SHORT: |
|
2190 |
case T_CHAR: |
|
2191 |
t = T_INT; |
|
2192 |
break; |
|
2193 |
} |
|
2194 |
||
2195 |
LIR_Opr dest = new_register(t); |
|
2196 |
__ move(src, dest); |
|
2197 |
||
2198 |
// Assign new location to Local instruction for this local |
|
2199 |
Local* local = x->state()->local_at(java_index)->as_Local(); |
|
2200 |
assert(local != NULL, "Locals for incoming arguments must have been created"); |
|
2201 |
assert(as_ValueType(t)->tag() == local->type()->tag(), "check"); |
|
2202 |
local->set_operand(dest); |
|
2203 |
_instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL); |
|
2204 |
java_index += type2size[t]; |
|
2205 |
} |
|
2206 |
||
2207 |
if (DTraceMethodProbes) { |
|
2208 |
BasicTypeList signature; |
|
2209 |
signature.append(T_INT); // thread |
|
2210 |
signature.append(T_OBJECT); // methodOop |
|
2211 |
LIR_OprList* args = new LIR_OprList(); |
|
2212 |
args->append(getThreadPointer()); |
|
2213 |
LIR_Opr meth = new_register(T_OBJECT); |
|
2214 |
__ oop2reg(method()->encoding(), meth); |
|
2215 |
args->append(meth); |
|
2216 |
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL); |
|
2217 |
} |
|
2218 |
||
2219 |
if (method()->is_synchronized()) { |
|
2220 |
LIR_Opr obj; |
|
2221 |
if (method()->is_static()) { |
|
2222 |
obj = new_register(T_OBJECT); |
|
2223 |
__ oop2reg(method()->holder()->java_mirror()->encoding(), obj); |
|
2224 |
} else { |
|
2225 |
Local* receiver = x->state()->local_at(0)->as_Local(); |
|
2226 |
assert(receiver != NULL, "must already exist"); |
|
2227 |
obj = receiver->operand(); |
|
2228 |
} |
|
2229 |
assert(obj->is_valid(), "must be valid"); |
|
2230 |
||
2231 |
if (method()->is_synchronized() && GenerateSynchronizationCode) { |
|
2232 |
LIR_Opr lock = new_register(T_INT); |
|
2233 |
__ load_stack_address_monitor(0, lock); |
|
2234 |
||
2235 |
CodeEmitInfo* info = new CodeEmitInfo(SynchronizationEntryBCI, scope()->start()->state(), NULL); |
|
2236 |
CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); |
|
2237 |
||
2238 |
// receiver is guaranteed non-NULL so don't need CodeEmitInfo |
|
2239 |
__ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); |
|
2240 |
} |
|
2241 |
} |
|
2242 |
||
2243 |
// increment invocation counters if needed |
|
2244 |
increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL)); |
|
2245 |
||
2246 |
// all blocks with a successor must end with an unconditional jump |
|
2247 |
// to the successor even if they are consecutive |
|
2248 |
__ jump(x->default_sux()); |
|
2249 |
} |
|
2250 |
||
2251 |
||
2252 |
void LIRGenerator::do_OsrEntry(OsrEntry* x) { |
|
2253 |
// construct our frame and model the production of incoming pointer |
|
2254 |
// to the OSR buffer. |
|
2255 |
__ osr_entry(LIR_Assembler::osrBufferPointer()); |
|
2256 |
LIR_Opr result = rlock_result(x); |
|
2257 |
__ move(LIR_Assembler::osrBufferPointer(), result); |
|
2258 |
} |
|
2259 |
||
2260 |
||
2261 |
void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { |
|
2262 |
int i = x->has_receiver() ? 1 : 0; |
|
2263 |
for (; i < args->length(); i++) { |
|
2264 |
LIRItem* param = args->at(i); |
|
2265 |
LIR_Opr loc = arg_list->at(i); |
|
2266 |
if (loc->is_register()) { |
|
2267 |
param->load_item_force(loc); |
|
2268 |
} else { |
|
2269 |
LIR_Address* addr = loc->as_address_ptr(); |
|
2270 |
param->load_for_store(addr->type()); |
|
2271 |
if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { |
|
2272 |
__ unaligned_move(param->result(), addr); |
|
2273 |
} else { |
|
2274 |
__ move(param->result(), addr); |
|
2275 |
} |
|
2276 |
} |
|
2277 |
} |
|
2278 |
||
2279 |
if (x->has_receiver()) { |
|
2280 |
LIRItem* receiver = args->at(0); |
|
2281 |
LIR_Opr loc = arg_list->at(0); |
|
2282 |
if (loc->is_register()) { |
|
2283 |
receiver->load_item_force(loc); |
|
2284 |
} else { |
|
2285 |
assert(loc->is_address(), "just checking"); |
|
2286 |
receiver->load_for_store(T_OBJECT); |
|
2287 |
__ move(receiver->result(), loc); |
|
2288 |
} |
|
2289 |
} |
|
2290 |
} |
|
2291 |
||
2292 |
||
2293 |
// Visits all arguments, returns appropriate items without loading them |
|
2294 |
LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) { |
|
2295 |
LIRItemList* argument_items = new LIRItemList(); |
|
2296 |
if (x->has_receiver()) { |
|
2297 |
LIRItem* receiver = new LIRItem(x->receiver(), this); |
|
2298 |
argument_items->append(receiver); |
|
2299 |
} |
|
2300 |
int idx = x->has_receiver() ? 1 : 0; |
|
2301 |
for (int i = 0; i < x->number_of_arguments(); i++) { |
|
2302 |
LIRItem* param = new LIRItem(x->argument_at(i), this); |
|
2303 |
argument_items->append(param); |
|
2304 |
idx += (param->type()->is_double_word() ? 2 : 1); |
|
2305 |
} |
|
2306 |
return argument_items; |
|
2307 |
} |
|
2308 |
||
2309 |
||
2310 |
// The invoke with receiver has following phases: |
|
2311 |
// a) traverse and load/lock receiver; |
|
2312 |
// b) traverse all arguments -> item-array (invoke_visit_argument) |
|
2313 |
// c) push receiver on stack |
|
2314 |
// d) load each of the items and push on stack |
|
2315 |
// e) unlock receiver |
|
2316 |
// f) move receiver into receiver-register %o0 |
|
2317 |
// g) lock result registers and emit call operation |
|
2318 |
// |
|
2319 |
// Before issuing a call, we must spill-save all values on stack |
|
2320 |
// that are in caller-save register. "spill-save" moves thos registers |
|
2321 |
// either in a free callee-save register or spills them if no free |
|
2322 |
// callee save register is available. |
|
2323 |
// |
|
2324 |
// The problem is where to invoke spill-save. |
|
2325 |
// - if invoked between e) and f), we may lock callee save |
|
2326 |
// register in "spill-save" that destroys the receiver register |
|
2327 |
// before f) is executed |
|
2328 |
// - if we rearange the f) to be earlier, by loading %o0, it |
|
2329 |
// may destroy a value on the stack that is currently in %o0 |
|
2330 |
// and is waiting to be spilled |
|
2331 |
// - if we keep the receiver locked while doing spill-save, |
|
2332 |
// we cannot spill it as it is spill-locked |
|
2333 |
// |
|
2334 |
void LIRGenerator::do_Invoke(Invoke* x) { |
|
2335 |
CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true); |
|
2336 |
||
2337 |
LIR_OprList* arg_list = cc->args(); |
|
2338 |
LIRItemList* args = invoke_visit_arguments(x); |
|
2339 |
LIR_Opr receiver = LIR_OprFact::illegalOpr; |
|
2340 |
||
2341 |
// setup result register |
|
2342 |
LIR_Opr result_register = LIR_OprFact::illegalOpr; |
|
2343 |
if (x->type() != voidType) { |
|
2344 |
result_register = result_register_for(x->type()); |
|
2345 |
} |
|
2346 |
||
2347 |
CodeEmitInfo* info = state_for(x, x->state()); |
|
2348 |
||
2349 |
invoke_load_arguments(x, args, arg_list); |
|
2350 |
||
2351 |
if (x->has_receiver()) { |
|
2352 |
args->at(0)->load_item_force(LIR_Assembler::receiverOpr()); |
|
2353 |
receiver = args->at(0)->result(); |
|
2354 |
} |
|
2355 |
||
2356 |
// emit invoke code |
|
2357 |
bool optimized = x->target_is_loaded() && x->target_is_final(); |
|
2358 |
assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match"); |
|
2359 |
||
2360 |
switch (x->code()) { |
|
2361 |
case Bytecodes::_invokestatic: |
|
2362 |
__ call_static(x->target(), result_register, |
|
2363 |
SharedRuntime::get_resolve_static_call_stub(), |
|
2364 |
arg_list, info); |
|
2365 |
break; |
|
2366 |
case Bytecodes::_invokespecial: |
|
2367 |
case Bytecodes::_invokevirtual: |
|
2368 |
case Bytecodes::_invokeinterface: |
|
2369 |
// for final target we still produce an inline cache, in order |
|
2370 |
// to be able to call mixed mode |
|
2371 |
if (x->code() == Bytecodes::_invokespecial || optimized) { |
|
2372 |
__ call_opt_virtual(x->target(), receiver, result_register, |
|
2373 |
SharedRuntime::get_resolve_opt_virtual_call_stub(), |
|
2374 |
arg_list, info); |
|
2375 |
} else if (x->vtable_index() < 0) { |
|
2376 |
__ call_icvirtual(x->target(), receiver, result_register, |
|
2377 |
SharedRuntime::get_resolve_virtual_call_stub(), |
|
2378 |
arg_list, info); |
|
2379 |
} else { |
|
2380 |
int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size(); |
|
2381 |
int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes(); |
|
2382 |
__ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info); |
|
2383 |
} |
|
2384 |
break; |
|
2385 |
default: |
|
2386 |
ShouldNotReachHere(); |
|
2387 |
break; |
|
2388 |
} |
|
2389 |
||
2390 |
if (x->type()->is_float() || x->type()->is_double()) { |
|
2391 |
// Force rounding of results from non-strictfp when in strictfp |
|
2392 |
// scope (or when we don't know the strictness of the callee, to |
|
2393 |
// be safe.) |
|
2394 |
if (method()->is_strict()) { |
|
2395 |
if (!x->target_is_loaded() || !x->target_is_strictfp()) { |
|
2396 |
result_register = round_item(result_register); |
|
2397 |
} |
|
2398 |
} |
|
2399 |
} |
|
2400 |
||
2401 |
if (result_register->is_valid()) { |
|
2402 |
LIR_Opr result = rlock_result(x); |
|
2403 |
__ move(result_register, result); |
|
2404 |
} |
|
2405 |
} |
|
2406 |
||
2407 |
||
2408 |
void LIRGenerator::do_FPIntrinsics(Intrinsic* x) { |
|
2409 |
assert(x->number_of_arguments() == 1, "wrong type"); |
|
2410 |
LIRItem value (x->argument_at(0), this); |
|
2411 |
LIR_Opr reg = rlock_result(x); |
|
2412 |
value.load_item(); |
|
2413 |
LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type())); |
|
2414 |
__ move(tmp, reg); |
|
2415 |
} |
|
2416 |
||
2417 |
||
2418 |
||
2419 |
// Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval() |
|
2420 |
void LIRGenerator::do_IfOp(IfOp* x) { |
|
2421 |
#ifdef ASSERT |
|
2422 |
{ |
|
2423 |
ValueTag xtag = x->x()->type()->tag(); |
|
2424 |
ValueTag ttag = x->tval()->type()->tag(); |
|
2425 |
assert(xtag == intTag || xtag == objectTag, "cannot handle others"); |
|
2426 |
assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others"); |
|
2427 |
assert(ttag == x->fval()->type()->tag(), "cannot handle others"); |
|
2428 |
} |
|
2429 |
#endif |
|
2430 |
||
2431 |
LIRItem left(x->x(), this); |
|
2432 |
LIRItem right(x->y(), this); |
|
2433 |
left.load_item(); |
|
2434 |
if (can_inline_as_constant(right.value())) { |
|
2435 |
right.dont_load_item(); |
|
2436 |
} else { |
|
2437 |
right.load_item(); |
|
2438 |
} |
|
2439 |
||
2440 |
LIRItem t_val(x->tval(), this); |
|
2441 |
LIRItem f_val(x->fval(), this); |
|
2442 |
t_val.dont_load_item(); |
|
2443 |
f_val.dont_load_item(); |
|
2444 |
LIR_Opr reg = rlock_result(x); |
|
2445 |
||
2446 |
__ cmp(lir_cond(x->cond()), left.result(), right.result()); |
|
2447 |
__ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg); |
|
2448 |
} |
|
2449 |
||
2450 |
||
2451 |
void LIRGenerator::do_Intrinsic(Intrinsic* x) { |
|
2452 |
switch (x->id()) { |
|
2453 |
case vmIntrinsics::_intBitsToFloat : |
|
2454 |
case vmIntrinsics::_doubleToRawLongBits : |
|
2455 |
case vmIntrinsics::_longBitsToDouble : |
|
2456 |
case vmIntrinsics::_floatToRawIntBits : { |
|
2457 |
do_FPIntrinsics(x); |
|
2458 |
break; |
|
2459 |
} |
|
2460 |
||
2461 |
case vmIntrinsics::_currentTimeMillis: { |
|
2462 |
assert(x->number_of_arguments() == 0, "wrong type"); |
|
2463 |
LIR_Opr reg = result_register_for(x->type()); |
|
2464 |
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(), |
|
2465 |
reg, new LIR_OprList()); |
|
2466 |
LIR_Opr result = rlock_result(x); |
|
2467 |
__ move(reg, result); |
|
2468 |
break; |
|
2469 |
} |
|
2470 |
||
2471 |
case vmIntrinsics::_nanoTime: { |
|
2472 |
assert(x->number_of_arguments() == 0, "wrong type"); |
|
2473 |
LIR_Opr reg = result_register_for(x->type()); |
|
2474 |
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(), |
|
2475 |
reg, new LIR_OprList()); |
|
2476 |
LIR_Opr result = rlock_result(x); |
|
2477 |
__ move(reg, result); |
|
2478 |
break; |
|
2479 |
} |
|
2480 |
||
2481 |
case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break; |
|
2482 |
case vmIntrinsics::_getClass: do_getClass(x); break; |
|
2483 |
case vmIntrinsics::_currentThread: do_currentThread(x); break; |
|
2484 |
||
2485 |
case vmIntrinsics::_dlog: // fall through |
|
2486 |
case vmIntrinsics::_dlog10: // fall through |
|
2487 |
case vmIntrinsics::_dabs: // fall through |
|
2488 |
case vmIntrinsics::_dsqrt: // fall through |
|
2489 |
case vmIntrinsics::_dtan: // fall through |
|
2490 |
case vmIntrinsics::_dsin : // fall through |
|
2491 |
case vmIntrinsics::_dcos : do_MathIntrinsic(x); break; |
|
2492 |
case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break; |
|
2493 |
||
2494 |
// java.nio.Buffer.checkIndex |
|
2495 |
case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break; |
|
2496 |
||
2497 |
case vmIntrinsics::_compareAndSwapObject: |
|
2498 |
do_CompareAndSwap(x, objectType); |
|
2499 |
break; |
|
2500 |
case vmIntrinsics::_compareAndSwapInt: |
|
2501 |
do_CompareAndSwap(x, intType); |
|
2502 |
break; |
|
2503 |
case vmIntrinsics::_compareAndSwapLong: |
|
2504 |
do_CompareAndSwap(x, longType); |
|
2505 |
break; |
|
2506 |
||
2507 |
// sun.misc.AtomicLongCSImpl.attemptUpdate |
|
2508 |
case vmIntrinsics::_attemptUpdate: |
|
2509 |
do_AttemptUpdate(x); |
|
2510 |
break; |
|
2511 |
||
2512 |
default: ShouldNotReachHere(); break; |
|
2513 |
} |
|
2514 |
} |
|
2515 |
||
2516 |
||
2517 |
void LIRGenerator::do_ProfileCall(ProfileCall* x) { |
|
2518 |
// Need recv in a temporary register so it interferes with the other temporaries |
|
2519 |
LIR_Opr recv = LIR_OprFact::illegalOpr; |
|
2520 |
LIR_Opr mdo = new_register(T_OBJECT); |
|
2521 |
LIR_Opr tmp = new_register(T_INT); |
|
2522 |
if (x->recv() != NULL) { |
|
2523 |
LIRItem value(x->recv(), this); |
|
2524 |
value.load_item(); |
|
2525 |
recv = new_register(T_OBJECT); |
|
2526 |
__ move(value.result(), recv); |
|
2527 |
} |
|
2528 |
__ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder()); |
|
2529 |
} |
|
2530 |
||
2531 |
||
2532 |
void LIRGenerator::do_ProfileCounter(ProfileCounter* x) { |
|
2533 |
LIRItem mdo(x->mdo(), this); |
|
2534 |
mdo.load_item(); |
|
2535 |
||
2536 |
increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment()); |
|
2537 |
} |
|
2538 |
||
2539 |
||
2540 |
LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { |
|
2541 |
LIRItemList args(1); |
|
2542 |
LIRItem value(arg1, this); |
|
2543 |
args.append(&value); |
|
2544 |
BasicTypeList signature; |
|
2545 |
signature.append(as_BasicType(arg1->type())); |
|
2546 |
||
2547 |
return call_runtime(&signature, &args, entry, result_type, info); |
|
2548 |
} |
|
2549 |
||
2550 |
||
2551 |
LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) { |
|
2552 |
LIRItemList args(2); |
|
2553 |
LIRItem value1(arg1, this); |
|
2554 |
LIRItem value2(arg2, this); |
|
2555 |
args.append(&value1); |
|
2556 |
args.append(&value2); |
|
2557 |
BasicTypeList signature; |
|
2558 |
signature.append(as_BasicType(arg1->type())); |
|
2559 |
signature.append(as_BasicType(arg2->type())); |
|
2560 |
||
2561 |
return call_runtime(&signature, &args, entry, result_type, info); |
|
2562 |
} |
|
2563 |
||
2564 |
||
2565 |
LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args, |
|
2566 |
address entry, ValueType* result_type, CodeEmitInfo* info) { |
|
2567 |
// get a result register |
|
2568 |
LIR_Opr phys_reg = LIR_OprFact::illegalOpr; |
|
2569 |
LIR_Opr result = LIR_OprFact::illegalOpr; |
|
2570 |
if (result_type->tag() != voidTag) { |
|
2571 |
result = new_register(result_type); |
|
2572 |
phys_reg = result_register_for(result_type); |
|
2573 |
} |
|
2574 |
||
2575 |
// move the arguments into the correct location |
|
2576 |
CallingConvention* cc = frame_map()->c_calling_convention(signature); |
|
2577 |
assert(cc->length() == args->length(), "argument mismatch"); |
|
2578 |
for (int i = 0; i < args->length(); i++) { |
|
2579 |
LIR_Opr arg = args->at(i); |
|
2580 |
LIR_Opr loc = cc->at(i); |
|
2581 |
if (loc->is_register()) { |
|
2582 |
__ move(arg, loc); |
|
2583 |
} else { |
|
2584 |
LIR_Address* addr = loc->as_address_ptr(); |
|
2585 |
// if (!can_store_as_constant(arg)) { |
|
2586 |
// LIR_Opr tmp = new_register(arg->type()); |
|
2587 |
// __ move(arg, tmp); |
|
2588 |
// arg = tmp; |
|
2589 |
// } |
|
2590 |
if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { |
|
2591 |
__ unaligned_move(arg, addr); |
|
2592 |
} else { |
|
2593 |
__ move(arg, addr); |
|
2594 |
} |
|
2595 |
} |
|
2596 |
} |
|
2597 |
||
2598 |
if (info) { |
|
2599 |
__ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); |
|
2600 |
} else { |
|
2601 |
__ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); |
|
2602 |
} |
|
2603 |
if (result->is_valid()) { |
|
2604 |
__ move(phys_reg, result); |
|
2605 |
} |
|
2606 |
return result; |
|
2607 |
} |
|
2608 |
||
2609 |
||
2610 |
LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args, |
|
2611 |
address entry, ValueType* result_type, CodeEmitInfo* info) { |
|
2612 |
// get a result register |
|
2613 |
LIR_Opr phys_reg = LIR_OprFact::illegalOpr; |
|
2614 |
LIR_Opr result = LIR_OprFact::illegalOpr; |
|
2615 |
if (result_type->tag() != voidTag) { |
|
2616 |
result = new_register(result_type); |
|
2617 |
phys_reg = result_register_for(result_type); |
|
2618 |
} |
|
2619 |
||
2620 |
// move the arguments into the correct location |
|
2621 |
CallingConvention* cc = frame_map()->c_calling_convention(signature); |
|
2622 |
||
2623 |
assert(cc->length() == args->length(), "argument mismatch"); |
|
2624 |
for (int i = 0; i < args->length(); i++) { |
|
2625 |
LIRItem* arg = args->at(i); |
|
2626 |
LIR_Opr loc = cc->at(i); |
|
2627 |
if (loc->is_register()) { |
|
2628 |
arg->load_item_force(loc); |
|
2629 |
} else { |
|
2630 |
LIR_Address* addr = loc->as_address_ptr(); |
|
2631 |
arg->load_for_store(addr->type()); |
|
2632 |
if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { |
|
2633 |
__ unaligned_move(arg->result(), addr); |
|
2634 |
} else { |
|
2635 |
__ move(arg->result(), addr); |
|
2636 |
} |
|
2637 |
} |
|
2638 |
} |
|
2639 |
||
2640 |
if (info) { |
|
2641 |
__ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); |
|
2642 |
} else { |
|
2643 |
__ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); |
|
2644 |
} |
|
2645 |
if (result->is_valid()) { |
|
2646 |
__ move(phys_reg, result); |
|
2647 |
} |
|
2648 |
return result; |
|
2649 |
} |
|
2650 |
||
2651 |
||
2652 |
||
2653 |
void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedge) { |
|
2654 |
#ifdef TIERED |
|
2655 |
if (_compilation->env()->comp_level() == CompLevel_fast_compile && |
|
2656 |
(method()->code_size() >= Tier1BytecodeLimit || backedge)) { |
|
2657 |
int limit = InvocationCounter::Tier1InvocationLimit; |
|
2658 |
int offset = in_bytes(methodOopDesc::invocation_counter_offset() + |
|
2659 |
InvocationCounter::counter_offset()); |
|
2660 |
if (backedge) { |
|
2661 |
limit = InvocationCounter::Tier1BackEdgeLimit; |
|
2662 |
offset = in_bytes(methodOopDesc::backedge_counter_offset() + |
|
2663 |
InvocationCounter::counter_offset()); |
|
2664 |
} |
|
2665 |
||
2666 |
LIR_Opr meth = new_register(T_OBJECT); |
|
2667 |
__ oop2reg(method()->encoding(), meth); |
|
2668 |
LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment); |
|
2669 |
__ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit)); |
|
2670 |
CodeStub* overflow = new CounterOverflowStub(info, info->bci()); |
|
2671 |
__ branch(lir_cond_aboveEqual, T_INT, overflow); |
|
2672 |
__ branch_destination(overflow->continuation()); |
|
2673 |
} |
|
2674 |
#endif |
|
2675 |
} |