author | jrose |
Tue, 21 Apr 2009 23:21:04 -0700 | |
changeset 2570 | ecc7862946d4 |
parent 2566 | 865943584ecc |
child 3261 | c7d5aae8d3f7 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
1217 | 2 |
* Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#include "incls/_precompiled.incl" |
|
26 |
#include "incls/_c1_LinearScan.cpp.incl" |
|
27 |
||
28 |
||
29 |
#ifndef PRODUCT |
|
30 |
||
31 |
static LinearScanStatistic _stat_before_alloc; |
|
32 |
static LinearScanStatistic _stat_after_asign; |
|
33 |
static LinearScanStatistic _stat_final; |
|
34 |
||
35 |
static LinearScanTimers _total_timer; |
|
36 |
||
37 |
// helper macro for short definition of timer |
|
38 |
#define TIME_LINEAR_SCAN(timer_name) TraceTime _block_timer("", _total_timer.timer(LinearScanTimers::timer_name), TimeLinearScan || TimeEachLinearScan, Verbose); |
|
39 |
||
40 |
// helper macro for short definition of trace-output inside code |
|
41 |
#define TRACE_LINEAR_SCAN(level, code) \ |
|
42 |
if (TraceLinearScanLevel >= level) { \ |
|
43 |
code; \ |
|
44 |
} |
|
45 |
||
46 |
#else |
|
47 |
||
48 |
#define TIME_LINEAR_SCAN(timer_name) |
|
49 |
#define TRACE_LINEAR_SCAN(level, code) |
|
50 |
||
51 |
#endif |
|
52 |
||
53 |
// Map BasicType to spill size in 32-bit words, matching VMReg's notion of words |
|
54 |
#ifdef _LP64 |
|
55 |
static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 1, -1}; |
|
56 |
#else |
|
57 |
static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1}; |
|
58 |
#endif |
|
59 |
||
60 |
||
61 |
// Implementation of LinearScan |
|
62 |
||
63 |
LinearScan::LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map) |
|
64 |
: _compilation(ir->compilation()) |
|
65 |
, _ir(ir) |
|
66 |
, _gen(gen) |
|
67 |
, _frame_map(frame_map) |
|
68 |
, _num_virtual_regs(gen->max_virtual_register_number()) |
|
69 |
, _has_fpu_registers(false) |
|
70 |
, _num_calls(-1) |
|
71 |
, _max_spills(0) |
|
72 |
, _unused_spill_slot(-1) |
|
73 |
, _intervals(0) // initialized later with correct length |
|
74 |
, _new_intervals_from_allocation(new IntervalList()) |
|
75 |
, _sorted_intervals(NULL) |
|
76 |
, _lir_ops(0) // initialized later with correct length |
|
77 |
, _block_of_op(0) // initialized later with correct length |
|
78 |
, _has_info(0) |
|
79 |
, _has_call(0) |
|
80 |
, _scope_value_cache(0) // initialized later with correct length |
|
81 |
, _interval_in_loop(0, 0) // initialized later with correct length |
|
82 |
, _cached_blocks(*ir->linear_scan_order()) |
|
1066 | 83 |
#ifdef X86 |
1 | 84 |
, _fpu_stack_allocator(NULL) |
85 |
#endif |
|
86 |
{ |
|
87 |
// note: to use more than on instance of LinearScan at a time this function call has to |
|
88 |
// be moved somewhere outside of this constructor: |
|
89 |
Interval::initialize(); |
|
90 |
||
91 |
assert(this->ir() != NULL, "check if valid"); |
|
92 |
assert(this->compilation() != NULL, "check if valid"); |
|
93 |
assert(this->gen() != NULL, "check if valid"); |
|
94 |
assert(this->frame_map() != NULL, "check if valid"); |
|
95 |
} |
|
96 |
||
97 |
||
98 |
// ********** functions for converting LIR-Operands to register numbers |
|
99 |
// |
|
100 |
// Emulate a flat register file comprising physical integer registers, |
|
101 |
// physical floating-point registers and virtual registers, in that order. |
|
102 |
// Virtual registers already have appropriate numbers, since V0 is |
|
103 |
// the number of physical registers. |
|
104 |
// Returns -1 for hi word if opr is a single word operand. |
|
105 |
// |
|
106 |
// Note: the inverse operation (calculating an operand for register numbers) |
|
107 |
// is done in calc_operand_for_interval() |
|
108 |
||
109 |
int LinearScan::reg_num(LIR_Opr opr) { |
|
110 |
assert(opr->is_register(), "should not call this otherwise"); |
|
111 |
||
112 |
if (opr->is_virtual_register()) { |
|
113 |
assert(opr->vreg_number() >= nof_regs, "found a virtual register with a fixed-register number"); |
|
114 |
return opr->vreg_number(); |
|
115 |
} else if (opr->is_single_cpu()) { |
|
116 |
return opr->cpu_regnr(); |
|
117 |
} else if (opr->is_double_cpu()) { |
|
118 |
return opr->cpu_regnrLo(); |
|
1066 | 119 |
#ifdef X86 |
1 | 120 |
} else if (opr->is_single_xmm()) { |
121 |
return opr->fpu_regnr() + pd_first_xmm_reg; |
|
122 |
} else if (opr->is_double_xmm()) { |
|
123 |
return opr->fpu_regnrLo() + pd_first_xmm_reg; |
|
124 |
#endif |
|
125 |
} else if (opr->is_single_fpu()) { |
|
126 |
return opr->fpu_regnr() + pd_first_fpu_reg; |
|
127 |
} else if (opr->is_double_fpu()) { |
|
128 |
return opr->fpu_regnrLo() + pd_first_fpu_reg; |
|
129 |
} else { |
|
130 |
ShouldNotReachHere(); |
|
1066 | 131 |
return -1; |
1 | 132 |
} |
133 |
} |
|
134 |
||
135 |
int LinearScan::reg_numHi(LIR_Opr opr) { |
|
136 |
assert(opr->is_register(), "should not call this otherwise"); |
|
137 |
||
138 |
if (opr->is_virtual_register()) { |
|
139 |
return -1; |
|
140 |
} else if (opr->is_single_cpu()) { |
|
141 |
return -1; |
|
142 |
} else if (opr->is_double_cpu()) { |
|
143 |
return opr->cpu_regnrHi(); |
|
1066 | 144 |
#ifdef X86 |
1 | 145 |
} else if (opr->is_single_xmm()) { |
146 |
return -1; |
|
147 |
} else if (opr->is_double_xmm()) { |
|
148 |
return -1; |
|
149 |
#endif |
|
150 |
} else if (opr->is_single_fpu()) { |
|
151 |
return -1; |
|
152 |
} else if (opr->is_double_fpu()) { |
|
153 |
return opr->fpu_regnrHi() + pd_first_fpu_reg; |
|
154 |
} else { |
|
155 |
ShouldNotReachHere(); |
|
1066 | 156 |
return -1; |
1 | 157 |
} |
158 |
} |
|
159 |
||
160 |
||
161 |
// ********** functions for classification of intervals |
|
162 |
||
163 |
bool LinearScan::is_precolored_interval(const Interval* i) { |
|
164 |
return i->reg_num() < LinearScan::nof_regs; |
|
165 |
} |
|
166 |
||
167 |
bool LinearScan::is_virtual_interval(const Interval* i) { |
|
168 |
return i->reg_num() >= LIR_OprDesc::vreg_base; |
|
169 |
} |
|
170 |
||
171 |
bool LinearScan::is_precolored_cpu_interval(const Interval* i) { |
|
172 |
return i->reg_num() < LinearScan::nof_cpu_regs; |
|
173 |
} |
|
174 |
||
175 |
bool LinearScan::is_virtual_cpu_interval(const Interval* i) { |
|
176 |
return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() != T_FLOAT && i->type() != T_DOUBLE); |
|
177 |
} |
|
178 |
||
179 |
bool LinearScan::is_precolored_fpu_interval(const Interval* i) { |
|
180 |
return i->reg_num() >= LinearScan::nof_cpu_regs && i->reg_num() < LinearScan::nof_regs; |
|
181 |
} |
|
182 |
||
183 |
bool LinearScan::is_virtual_fpu_interval(const Interval* i) { |
|
184 |
return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE); |
|
185 |
} |
|
186 |
||
187 |
bool LinearScan::is_in_fpu_register(const Interval* i) { |
|
188 |
// fixed intervals not needed for FPU stack allocation |
|
189 |
return i->reg_num() >= nof_regs && pd_first_fpu_reg <= i->assigned_reg() && i->assigned_reg() <= pd_last_fpu_reg; |
|
190 |
} |
|
191 |
||
192 |
bool LinearScan::is_oop_interval(const Interval* i) { |
|
193 |
// fixed intervals never contain oops |
|
194 |
return i->reg_num() >= nof_regs && i->type() == T_OBJECT; |
|
195 |
} |
|
196 |
||
197 |
||
198 |
// ********** General helper functions |
|
199 |
||
200 |
// compute next unused stack index that can be used for spilling |
|
201 |
int LinearScan::allocate_spill_slot(bool double_word) { |
|
202 |
int spill_slot; |
|
203 |
if (double_word) { |
|
204 |
if ((_max_spills & 1) == 1) { |
|
205 |
// alignment of double-word values |
|
206 |
// the hole because of the alignment is filled with the next single-word value |
|
207 |
assert(_unused_spill_slot == -1, "wasting a spill slot"); |
|
208 |
_unused_spill_slot = _max_spills; |
|
209 |
_max_spills++; |
|
210 |
} |
|
211 |
spill_slot = _max_spills; |
|
212 |
_max_spills += 2; |
|
213 |
||
214 |
} else if (_unused_spill_slot != -1) { |
|
215 |
// re-use hole that was the result of a previous double-word alignment |
|
216 |
spill_slot = _unused_spill_slot; |
|
217 |
_unused_spill_slot = -1; |
|
218 |
||
219 |
} else { |
|
220 |
spill_slot = _max_spills; |
|
221 |
_max_spills++; |
|
222 |
} |
|
223 |
||
224 |
int result = spill_slot + LinearScan::nof_regs + frame_map()->argcount(); |
|
225 |
||
226 |
// the class OopMapValue uses only 11 bits for storing the name of the |
|
227 |
// oop location. So a stack slot bigger than 2^11 leads to an overflow |
|
228 |
// that is not reported in product builds. Prevent this by checking the |
|
229 |
// spill slot here (altough this value and the later used location name |
|
230 |
// are slightly different) |
|
231 |
if (result > 2000) { |
|
232 |
bailout("too many stack slots used"); |
|
233 |
} |
|
234 |
||
235 |
return result; |
|
236 |
} |
|
237 |
||
238 |
void LinearScan::assign_spill_slot(Interval* it) { |
|
239 |
// assign the canonical spill slot of the parent (if a part of the interval |
|
240 |
// is already spilled) or allocate a new spill slot |
|
241 |
if (it->canonical_spill_slot() >= 0) { |
|
242 |
it->assign_reg(it->canonical_spill_slot()); |
|
243 |
} else { |
|
244 |
int spill = allocate_spill_slot(type2spill_size[it->type()] == 2); |
|
245 |
it->set_canonical_spill_slot(spill); |
|
246 |
it->assign_reg(spill); |
|
247 |
} |
|
248 |
} |
|
249 |
||
250 |
void LinearScan::propagate_spill_slots() { |
|
251 |
if (!frame_map()->finalize_frame(max_spills())) { |
|
252 |
bailout("frame too large"); |
|
253 |
} |
|
254 |
} |
|
255 |
||
256 |
// create a new interval with a predefined reg_num |
|
257 |
// (only used for parent intervals that are created during the building phase) |
|
258 |
Interval* LinearScan::create_interval(int reg_num) { |
|
259 |
assert(_intervals.at(reg_num) == NULL, "overwriting exisiting interval"); |
|
260 |
||
261 |
Interval* interval = new Interval(reg_num); |
|
262 |
_intervals.at_put(reg_num, interval); |
|
263 |
||
264 |
// assign register number for precolored intervals |
|
265 |
if (reg_num < LIR_OprDesc::vreg_base) { |
|
266 |
interval->assign_reg(reg_num); |
|
267 |
} |
|
268 |
return interval; |
|
269 |
} |
|
270 |
||
271 |
// assign a new reg_num to the interval and append it to the list of intervals |
|
272 |
// (only used for child intervals that are created during register allocation) |
|
273 |
void LinearScan::append_interval(Interval* it) { |
|
274 |
it->set_reg_num(_intervals.length()); |
|
275 |
_intervals.append(it); |
|
276 |
_new_intervals_from_allocation->append(it); |
|
277 |
} |
|
278 |
||
279 |
// copy the vreg-flags if an interval is split |
|
280 |
void LinearScan::copy_register_flags(Interval* from, Interval* to) { |
|
281 |
if (gen()->is_vreg_flag_set(from->reg_num(), LIRGenerator::byte_reg)) { |
|
282 |
gen()->set_vreg_flag(to->reg_num(), LIRGenerator::byte_reg); |
|
283 |
} |
|
284 |
if (gen()->is_vreg_flag_set(from->reg_num(), LIRGenerator::callee_saved)) { |
|
285 |
gen()->set_vreg_flag(to->reg_num(), LIRGenerator::callee_saved); |
|
286 |
} |
|
287 |
||
288 |
// Note: do not copy the must_start_in_memory flag because it is not necessary for child |
|
289 |
// intervals (only the very beginning of the interval must be in memory) |
|
290 |
} |
|
291 |
||
292 |
||
293 |
// ********** spill move optimization |
|
294 |
// eliminate moves from register to stack if stack slot is known to be correct |
|
295 |
||
296 |
// called during building of intervals |
|
297 |
void LinearScan::change_spill_definition_pos(Interval* interval, int def_pos) { |
|
298 |
assert(interval->is_split_parent(), "can only be called for split parents"); |
|
299 |
||
300 |
switch (interval->spill_state()) { |
|
301 |
case noDefinitionFound: |
|
302 |
assert(interval->spill_definition_pos() == -1, "must no be set before"); |
|
303 |
interval->set_spill_definition_pos(def_pos); |
|
304 |
interval->set_spill_state(oneDefinitionFound); |
|
305 |
break; |
|
306 |
||
307 |
case oneDefinitionFound: |
|
308 |
assert(def_pos <= interval->spill_definition_pos(), "positions are processed in reverse order when intervals are created"); |
|
309 |
if (def_pos < interval->spill_definition_pos() - 2) { |
|
310 |
// second definition found, so no spill optimization possible for this interval |
|
311 |
interval->set_spill_state(noOptimization); |
|
312 |
} else { |
|
313 |
// two consecutive definitions (because of two-operand LIR form) |
|
314 |
assert(block_of_op_with_id(def_pos) == block_of_op_with_id(interval->spill_definition_pos()), "block must be equal"); |
|
315 |
} |
|
316 |
break; |
|
317 |
||
318 |
case noOptimization: |
|
319 |
// nothing to do |
|
320 |
break; |
|
321 |
||
322 |
default: |
|
323 |
assert(false, "other states not allowed at this time"); |
|
324 |
} |
|
325 |
} |
|
326 |
||
327 |
// called during register allocation |
|
328 |
void LinearScan::change_spill_state(Interval* interval, int spill_pos) { |
|
329 |
switch (interval->spill_state()) { |
|
330 |
case oneDefinitionFound: { |
|
331 |
int def_loop_depth = block_of_op_with_id(interval->spill_definition_pos())->loop_depth(); |
|
332 |
int spill_loop_depth = block_of_op_with_id(spill_pos)->loop_depth(); |
|
333 |
||
334 |
if (def_loop_depth < spill_loop_depth) { |
|
335 |
// the loop depth of the spilling position is higher then the loop depth |
|
336 |
// at the definition of the interval -> move write to memory out of loop |
|
337 |
// by storing at definitin of the interval |
|
338 |
interval->set_spill_state(storeAtDefinition); |
|
339 |
} else { |
|
340 |
// the interval is currently spilled only once, so for now there is no |
|
341 |
// reason to store the interval at the definition |
|
342 |
interval->set_spill_state(oneMoveInserted); |
|
343 |
} |
|
344 |
break; |
|
345 |
} |
|
346 |
||
347 |
case oneMoveInserted: { |
|
348 |
// the interval is spilled more then once, so it is better to store it to |
|
349 |
// memory at the definition |
|
350 |
interval->set_spill_state(storeAtDefinition); |
|
351 |
break; |
|
352 |
} |
|
353 |
||
354 |
case storeAtDefinition: |
|
355 |
case startInMemory: |
|
356 |
case noOptimization: |
|
357 |
case noDefinitionFound: |
|
358 |
// nothing to do |
|
359 |
break; |
|
360 |
||
361 |
default: |
|
362 |
assert(false, "other states not allowed at this time"); |
|
363 |
} |
|
364 |
} |
|
365 |
||
366 |
||
367 |
bool LinearScan::must_store_at_definition(const Interval* i) { |
|
368 |
return i->is_split_parent() && i->spill_state() == storeAtDefinition; |
|
369 |
} |
|
370 |
||
371 |
// called once before asignment of register numbers |
|
372 |
void LinearScan::eliminate_spill_moves() { |
|
373 |
TIME_LINEAR_SCAN(timer_eliminate_spill_moves); |
|
374 |
TRACE_LINEAR_SCAN(3, tty->print_cr("***** Eliminating unnecessary spill moves")); |
|
375 |
||
376 |
// collect all intervals that must be stored after their definion. |
|
377 |
// the list is sorted by Interval::spill_definition_pos |
|
378 |
Interval* interval; |
|
379 |
Interval* temp_list; |
|
380 |
create_unhandled_lists(&interval, &temp_list, must_store_at_definition, NULL); |
|
381 |
||
382 |
#ifdef ASSERT |
|
383 |
Interval* prev = NULL; |
|
384 |
Interval* temp = interval; |
|
385 |
while (temp != Interval::end()) { |
|
386 |
assert(temp->spill_definition_pos() > 0, "invalid spill definition pos"); |
|
387 |
if (prev != NULL) { |
|
388 |
assert(temp->from() >= prev->from(), "intervals not sorted"); |
|
389 |
assert(temp->spill_definition_pos() >= prev->spill_definition_pos(), "when intervals are sorted by from, then they must also be sorted by spill_definition_pos"); |
|
390 |
} |
|
391 |
||
392 |
assert(temp->canonical_spill_slot() >= LinearScan::nof_regs, "interval has no spill slot assigned"); |
|
393 |
assert(temp->spill_definition_pos() >= temp->from(), "invalid order"); |
|
394 |
assert(temp->spill_definition_pos() <= temp->from() + 2, "only intervals defined once at their start-pos can be optimized"); |
|
395 |
||
396 |
TRACE_LINEAR_SCAN(4, tty->print_cr("interval %d (from %d to %d) must be stored at %d", temp->reg_num(), temp->from(), temp->to(), temp->spill_definition_pos())); |
|
397 |
||
398 |
temp = temp->next(); |
|
399 |
} |
|
400 |
#endif |
|
401 |
||
402 |
LIR_InsertionBuffer insertion_buffer; |
|
403 |
int num_blocks = block_count(); |
|
404 |
for (int i = 0; i < num_blocks; i++) { |
|
405 |
BlockBegin* block = block_at(i); |
|
406 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
407 |
int num_inst = instructions->length(); |
|
408 |
bool has_new = false; |
|
409 |
||
410 |
// iterate all instructions of the block. skip the first because it is always a label |
|
411 |
for (int j = 1; j < num_inst; j++) { |
|
412 |
LIR_Op* op = instructions->at(j); |
|
413 |
int op_id = op->id(); |
|
414 |
||
415 |
if (op_id == -1) { |
|
416 |
// remove move from register to stack if the stack slot is guaranteed to be correct. |
|
417 |
// only moves that have been inserted by LinearScan can be removed. |
|
418 |
assert(op->code() == lir_move, "only moves can have a op_id of -1"); |
|
419 |
assert(op->as_Op1() != NULL, "move must be LIR_Op1"); |
|
420 |
assert(op->as_Op1()->result_opr()->is_virtual(), "LinearScan inserts only moves to virtual registers"); |
|
421 |
||
422 |
LIR_Op1* op1 = (LIR_Op1*)op; |
|
423 |
Interval* interval = interval_at(op1->result_opr()->vreg_number()); |
|
424 |
||
425 |
if (interval->assigned_reg() >= LinearScan::nof_regs && interval->always_in_memory()) { |
|
426 |
// move target is a stack slot that is always correct, so eliminate instruction |
|
427 |
TRACE_LINEAR_SCAN(4, tty->print_cr("eliminating move from interval %d to %d", op1->in_opr()->vreg_number(), op1->result_opr()->vreg_number())); |
|
428 |
instructions->at_put(j, NULL); // NULL-instructions are deleted by assign_reg_num |
|
429 |
} |
|
430 |
||
431 |
} else { |
|
432 |
// insert move from register to stack just after the beginning of the interval |
|
433 |
assert(interval == Interval::end() || interval->spill_definition_pos() >= op_id, "invalid order"); |
|
434 |
assert(interval == Interval::end() || (interval->is_split_parent() && interval->spill_state() == storeAtDefinition), "invalid interval"); |
|
435 |
||
436 |
while (interval != Interval::end() && interval->spill_definition_pos() == op_id) { |
|
437 |
if (!has_new) { |
|
438 |
// prepare insertion buffer (appended when all instructions of the block are processed) |
|
439 |
insertion_buffer.init(block->lir()); |
|
440 |
has_new = true; |
|
441 |
} |
|
442 |
||
443 |
LIR_Opr from_opr = operand_for_interval(interval); |
|
444 |
LIR_Opr to_opr = canonical_spill_opr(interval); |
|
445 |
assert(from_opr->is_fixed_cpu() || from_opr->is_fixed_fpu(), "from operand must be a register"); |
|
446 |
assert(to_opr->is_stack(), "to operand must be a stack slot"); |
|
447 |
||
448 |
insertion_buffer.move(j, from_opr, to_opr); |
|
449 |
TRACE_LINEAR_SCAN(4, tty->print_cr("inserting move after definition of interval %d to stack slot %d at op_id %d", interval->reg_num(), interval->canonical_spill_slot() - LinearScan::nof_regs, op_id)); |
|
450 |
||
451 |
interval = interval->next(); |
|
452 |
} |
|
453 |
} |
|
454 |
} // end of instruction iteration |
|
455 |
||
456 |
if (has_new) { |
|
457 |
block->lir()->append(&insertion_buffer); |
|
458 |
} |
|
459 |
} // end of block iteration |
|
460 |
||
461 |
assert(interval == Interval::end(), "missed an interval"); |
|
462 |
} |
|
463 |
||
464 |
||
465 |
// ********** Phase 1: number all instructions in all blocks |
|
466 |
// Compute depth-first and linear scan block orders, and number LIR_Op nodes for linear scan. |
|
467 |
||
468 |
void LinearScan::number_instructions() { |
|
469 |
{ |
|
470 |
// dummy-timer to measure the cost of the timer itself |
|
471 |
// (this time is then subtracted from all other timers to get the real value) |
|
472 |
TIME_LINEAR_SCAN(timer_do_nothing); |
|
473 |
} |
|
474 |
TIME_LINEAR_SCAN(timer_number_instructions); |
|
475 |
||
476 |
// Assign IDs to LIR nodes and build a mapping, lir_ops, from ID to LIR_Op node. |
|
477 |
int num_blocks = block_count(); |
|
478 |
int num_instructions = 0; |
|
479 |
int i; |
|
480 |
for (i = 0; i < num_blocks; i++) { |
|
481 |
num_instructions += block_at(i)->lir()->instructions_list()->length(); |
|
482 |
} |
|
483 |
||
484 |
// initialize with correct length |
|
485 |
_lir_ops = LIR_OpArray(num_instructions); |
|
486 |
_block_of_op = BlockBeginArray(num_instructions); |
|
487 |
||
488 |
int op_id = 0; |
|
489 |
int idx = 0; |
|
490 |
||
491 |
for (i = 0; i < num_blocks; i++) { |
|
492 |
BlockBegin* block = block_at(i); |
|
493 |
block->set_first_lir_instruction_id(op_id); |
|
494 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
495 |
||
496 |
int num_inst = instructions->length(); |
|
497 |
for (int j = 0; j < num_inst; j++) { |
|
498 |
LIR_Op* op = instructions->at(j); |
|
499 |
op->set_id(op_id); |
|
500 |
||
501 |
_lir_ops.at_put(idx, op); |
|
502 |
_block_of_op.at_put(idx, block); |
|
503 |
assert(lir_op_with_id(op_id) == op, "must match"); |
|
504 |
||
505 |
idx++; |
|
506 |
op_id += 2; // numbering of lir_ops by two |
|
507 |
} |
|
508 |
block->set_last_lir_instruction_id(op_id - 2); |
|
509 |
} |
|
510 |
assert(idx == num_instructions, "must match"); |
|
511 |
assert(idx * 2 == op_id, "must match"); |
|
512 |
||
513 |
_has_call = BitMap(num_instructions); _has_call.clear(); |
|
514 |
_has_info = BitMap(num_instructions); _has_info.clear(); |
|
515 |
} |
|
516 |
||
517 |
||
518 |
// ********** Phase 2: compute local live sets separately for each block |
|
519 |
// (sets live_gen and live_kill for each block) |
|
520 |
||
521 |
void LinearScan::set_live_gen_kill(Value value, LIR_Op* op, BitMap& live_gen, BitMap& live_kill) { |
|
522 |
LIR_Opr opr = value->operand(); |
|
523 |
Constant* con = value->as_Constant(); |
|
524 |
||
525 |
// check some asumptions about debug information |
|
526 |
assert(!value->type()->is_illegal(), "if this local is used by the interpreter it shouldn't be of indeterminate type"); |
|
527 |
assert(con == NULL || opr->is_virtual() || opr->is_constant() || opr->is_illegal(), "asumption: Constant instructions have only constant operands"); |
|
528 |
assert(con != NULL || opr->is_virtual(), "asumption: non-Constant instructions have only virtual operands"); |
|
529 |
||
530 |
if ((con == NULL || con->is_pinned()) && opr->is_register()) { |
|
531 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
532 |
int reg = opr->vreg_number(); |
|
533 |
if (!live_kill.at(reg)) { |
|
534 |
live_gen.set_bit(reg); |
|
535 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" Setting live_gen for value %c%d, LIR op_id %d, register number %d", value->type()->tchar(), value->id(), op->id(), reg)); |
|
536 |
} |
|
537 |
} |
|
538 |
} |
|
539 |
||
540 |
||
541 |
void LinearScan::compute_local_live_sets() { |
|
542 |
TIME_LINEAR_SCAN(timer_compute_local_live_sets); |
|
543 |
||
544 |
int num_blocks = block_count(); |
|
545 |
int live_size = live_set_size(); |
|
546 |
bool local_has_fpu_registers = false; |
|
547 |
int local_num_calls = 0; |
|
548 |
LIR_OpVisitState visitor; |
|
549 |
||
550 |
BitMap2D local_interval_in_loop = BitMap2D(_num_virtual_regs, num_loops()); |
|
551 |
local_interval_in_loop.clear(); |
|
552 |
||
553 |
// iterate all blocks |
|
554 |
for (int i = 0; i < num_blocks; i++) { |
|
555 |
BlockBegin* block = block_at(i); |
|
556 |
||
557 |
BitMap live_gen(live_size); live_gen.clear(); |
|
558 |
BitMap live_kill(live_size); live_kill.clear(); |
|
559 |
||
560 |
if (block->is_set(BlockBegin::exception_entry_flag)) { |
|
561 |
// Phi functions at the begin of an exception handler are |
|
562 |
// implicitly defined (= killed) at the beginning of the block. |
|
563 |
for_each_phi_fun(block, phi, |
|
564 |
live_kill.set_bit(phi->operand()->vreg_number()) |
|
565 |
); |
|
566 |
} |
|
567 |
||
568 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
569 |
int num_inst = instructions->length(); |
|
570 |
||
571 |
// iterate all instructions of the block. skip the first because it is always a label |
|
572 |
assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label"); |
|
573 |
for (int j = 1; j < num_inst; j++) { |
|
574 |
LIR_Op* op = instructions->at(j); |
|
575 |
||
576 |
// visit operation to collect all operands |
|
577 |
visitor.visit(op); |
|
578 |
||
579 |
if (visitor.has_call()) { |
|
580 |
_has_call.set_bit(op->id() >> 1); |
|
581 |
local_num_calls++; |
|
582 |
} |
|
583 |
if (visitor.info_count() > 0) { |
|
584 |
_has_info.set_bit(op->id() >> 1); |
|
585 |
} |
|
586 |
||
587 |
// iterate input operands of instruction |
|
588 |
int k, n, reg; |
|
589 |
n = visitor.opr_count(LIR_OpVisitState::inputMode); |
|
590 |
for (k = 0; k < n; k++) { |
|
591 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, k); |
|
592 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
593 |
||
594 |
if (opr->is_virtual_register()) { |
|
595 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
596 |
reg = opr->vreg_number(); |
|
597 |
if (!live_kill.at(reg)) { |
|
598 |
live_gen.set_bit(reg); |
|
599 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" Setting live_gen for register %d at instruction %d", reg, op->id())); |
|
600 |
} |
|
601 |
if (block->loop_index() >= 0) { |
|
602 |
local_interval_in_loop.set_bit(reg, block->loop_index()); |
|
603 |
} |
|
604 |
local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu(); |
|
605 |
} |
|
606 |
||
607 |
#ifdef ASSERT |
|
608 |
// fixed intervals are never live at block boundaries, so |
|
609 |
// they need not be processed in live sets. |
|
610 |
// this is checked by these assertions to be sure about it. |
|
611 |
// the entry block may have incoming values in registers, which is ok. |
|
612 |
if (!opr->is_virtual_register() && block != ir()->start()) { |
|
613 |
reg = reg_num(opr); |
|
614 |
if (is_processed_reg_num(reg)) { |
|
615 |
assert(live_kill.at(reg), "using fixed register that is not defined in this block"); |
|
616 |
} |
|
617 |
reg = reg_numHi(opr); |
|
618 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
619 |
assert(live_kill.at(reg), "using fixed register that is not defined in this block"); |
|
620 |
} |
|
621 |
} |
|
622 |
#endif |
|
623 |
} |
|
624 |
||
625 |
// Add uses of live locals from interpreter's point of view for proper debug information generation |
|
626 |
n = visitor.info_count(); |
|
627 |
for (k = 0; k < n; k++) { |
|
628 |
CodeEmitInfo* info = visitor.info_at(k); |
|
629 |
ValueStack* stack = info->stack(); |
|
630 |
for_each_state_value(stack, value, |
|
631 |
set_live_gen_kill(value, op, live_gen, live_kill) |
|
632 |
); |
|
633 |
} |
|
634 |
||
635 |
// iterate temp operands of instruction |
|
636 |
n = visitor.opr_count(LIR_OpVisitState::tempMode); |
|
637 |
for (k = 0; k < n; k++) { |
|
638 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, k); |
|
639 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
640 |
||
641 |
if (opr->is_virtual_register()) { |
|
642 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
643 |
reg = opr->vreg_number(); |
|
644 |
live_kill.set_bit(reg); |
|
645 |
if (block->loop_index() >= 0) { |
|
646 |
local_interval_in_loop.set_bit(reg, block->loop_index()); |
|
647 |
} |
|
648 |
local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu(); |
|
649 |
} |
|
650 |
||
651 |
#ifdef ASSERT |
|
652 |
// fixed intervals are never live at block boundaries, so |
|
653 |
// they need not be processed in live sets |
|
654 |
// process them only in debug mode so that this can be checked |
|
655 |
if (!opr->is_virtual_register()) { |
|
656 |
reg = reg_num(opr); |
|
657 |
if (is_processed_reg_num(reg)) { |
|
658 |
live_kill.set_bit(reg_num(opr)); |
|
659 |
} |
|
660 |
reg = reg_numHi(opr); |
|
661 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
662 |
live_kill.set_bit(reg); |
|
663 |
} |
|
664 |
} |
|
665 |
#endif |
|
666 |
} |
|
667 |
||
668 |
// iterate output operands of instruction |
|
669 |
n = visitor.opr_count(LIR_OpVisitState::outputMode); |
|
670 |
for (k = 0; k < n; k++) { |
|
671 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, k); |
|
672 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
673 |
||
674 |
if (opr->is_virtual_register()) { |
|
675 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
676 |
reg = opr->vreg_number(); |
|
677 |
live_kill.set_bit(reg); |
|
678 |
if (block->loop_index() >= 0) { |
|
679 |
local_interval_in_loop.set_bit(reg, block->loop_index()); |
|
680 |
} |
|
681 |
local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu(); |
|
682 |
} |
|
683 |
||
684 |
#ifdef ASSERT |
|
685 |
// fixed intervals are never live at block boundaries, so |
|
686 |
// they need not be processed in live sets |
|
687 |
// process them only in debug mode so that this can be checked |
|
688 |
if (!opr->is_virtual_register()) { |
|
689 |
reg = reg_num(opr); |
|
690 |
if (is_processed_reg_num(reg)) { |
|
691 |
live_kill.set_bit(reg_num(opr)); |
|
692 |
} |
|
693 |
reg = reg_numHi(opr); |
|
694 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
695 |
live_kill.set_bit(reg); |
|
696 |
} |
|
697 |
} |
|
698 |
#endif |
|
699 |
} |
|
700 |
} // end of instruction iteration |
|
701 |
||
702 |
block->set_live_gen (live_gen); |
|
703 |
block->set_live_kill(live_kill); |
|
704 |
block->set_live_in (BitMap(live_size)); block->live_in().clear(); |
|
705 |
block->set_live_out (BitMap(live_size)); block->live_out().clear(); |
|
706 |
||
707 |
TRACE_LINEAR_SCAN(4, tty->print("live_gen B%d ", block->block_id()); print_bitmap(block->live_gen())); |
|
708 |
TRACE_LINEAR_SCAN(4, tty->print("live_kill B%d ", block->block_id()); print_bitmap(block->live_kill())); |
|
709 |
} // end of block iteration |
|
710 |
||
711 |
// propagate local calculated information into LinearScan object |
|
712 |
_has_fpu_registers = local_has_fpu_registers; |
|
713 |
compilation()->set_has_fpu_code(local_has_fpu_registers); |
|
714 |
||
715 |
_num_calls = local_num_calls; |
|
716 |
_interval_in_loop = local_interval_in_loop; |
|
717 |
} |
|
718 |
||
719 |
||
720 |
// ********** Phase 3: perform a backward dataflow analysis to compute global live sets |
|
721 |
// (sets live_in and live_out for each block) |
|
722 |
||
723 |
void LinearScan::compute_global_live_sets() { |
|
724 |
TIME_LINEAR_SCAN(timer_compute_global_live_sets); |
|
725 |
||
726 |
int num_blocks = block_count(); |
|
727 |
bool change_occurred; |
|
728 |
bool change_occurred_in_block; |
|
729 |
int iteration_count = 0; |
|
730 |
BitMap live_out(live_set_size()); live_out.clear(); // scratch set for calculations |
|
731 |
||
732 |
// Perform a backward dataflow analysis to compute live_out and live_in for each block. |
|
733 |
// The loop is executed until a fixpoint is reached (no changes in an iteration) |
|
734 |
// Exception handlers must be processed because not all live values are |
|
735 |
// present in the state array, e.g. because of global value numbering |
|
736 |
do { |
|
737 |
change_occurred = false; |
|
738 |
||
739 |
// iterate all blocks in reverse order |
|
740 |
for (int i = num_blocks - 1; i >= 0; i--) { |
|
741 |
BlockBegin* block = block_at(i); |
|
742 |
||
743 |
change_occurred_in_block = false; |
|
744 |
||
745 |
// live_out(block) is the union of live_in(sux), for successors sux of block |
|
746 |
int n = block->number_of_sux(); |
|
747 |
int e = block->number_of_exception_handlers(); |
|
748 |
if (n + e > 0) { |
|
749 |
// block has successors |
|
750 |
if (n > 0) { |
|
751 |
live_out.set_from(block->sux_at(0)->live_in()); |
|
752 |
for (int j = 1; j < n; j++) { |
|
753 |
live_out.set_union(block->sux_at(j)->live_in()); |
|
754 |
} |
|
755 |
} else { |
|
756 |
live_out.clear(); |
|
757 |
} |
|
758 |
for (int j = 0; j < e; j++) { |
|
759 |
live_out.set_union(block->exception_handler_at(j)->live_in()); |
|
760 |
} |
|
761 |
||
762 |
if (!block->live_out().is_same(live_out)) { |
|
763 |
// A change occurred. Swap the old and new live out sets to avoid copying. |
|
764 |
BitMap temp = block->live_out(); |
|
765 |
block->set_live_out(live_out); |
|
766 |
live_out = temp; |
|
767 |
||
768 |
change_occurred = true; |
|
769 |
change_occurred_in_block = true; |
|
770 |
} |
|
771 |
} |
|
772 |
||
773 |
if (iteration_count == 0 || change_occurred_in_block) { |
|
774 |
// live_in(block) is the union of live_gen(block) with (live_out(block) & !live_kill(block)) |
|
775 |
// note: live_in has to be computed only in first iteration or if live_out has changed! |
|
776 |
BitMap live_in = block->live_in(); |
|
777 |
live_in.set_from(block->live_out()); |
|
778 |
live_in.set_difference(block->live_kill()); |
|
779 |
live_in.set_union(block->live_gen()); |
|
780 |
} |
|
781 |
||
782 |
#ifndef PRODUCT |
|
783 |
if (TraceLinearScanLevel >= 4) { |
|
784 |
char c = ' '; |
|
785 |
if (iteration_count == 0 || change_occurred_in_block) { |
|
786 |
c = '*'; |
|
787 |
} |
|
788 |
tty->print("(%d) live_in%c B%d ", iteration_count, c, block->block_id()); print_bitmap(block->live_in()); |
|
789 |
tty->print("(%d) live_out%c B%d ", iteration_count, c, block->block_id()); print_bitmap(block->live_out()); |
|
790 |
} |
|
791 |
#endif |
|
792 |
} |
|
793 |
iteration_count++; |
|
794 |
||
795 |
if (change_occurred && iteration_count > 50) { |
|
796 |
BAILOUT("too many iterations in compute_global_live_sets"); |
|
797 |
} |
|
798 |
} while (change_occurred); |
|
799 |
||
800 |
||
801 |
#ifdef ASSERT |
|
802 |
// check that fixed intervals are not live at block boundaries |
|
803 |
// (live set must be empty at fixed intervals) |
|
804 |
for (int i = 0; i < num_blocks; i++) { |
|
805 |
BlockBegin* block = block_at(i); |
|
806 |
for (int j = 0; j < LIR_OprDesc::vreg_base; j++) { |
|
807 |
assert(block->live_in().at(j) == false, "live_in set of fixed register must be empty"); |
|
808 |
assert(block->live_out().at(j) == false, "live_out set of fixed register must be empty"); |
|
809 |
assert(block->live_gen().at(j) == false, "live_gen set of fixed register must be empty"); |
|
810 |
} |
|
811 |
} |
|
812 |
#endif |
|
813 |
||
814 |
// check that the live_in set of the first block is empty |
|
815 |
BitMap live_in_args(ir()->start()->live_in().size()); |
|
816 |
live_in_args.clear(); |
|
817 |
if (!ir()->start()->live_in().is_same(live_in_args)) { |
|
818 |
#ifdef ASSERT |
|
819 |
tty->print_cr("Error: live_in set of first block must be empty (when this fails, virtual registers are used before they are defined)"); |
|
820 |
tty->print_cr("affected registers:"); |
|
821 |
print_bitmap(ir()->start()->live_in()); |
|
822 |
||
823 |
// print some additional information to simplify debugging |
|
824 |
for (unsigned int i = 0; i < ir()->start()->live_in().size(); i++) { |
|
825 |
if (ir()->start()->live_in().at(i)) { |
|
826 |
Instruction* instr = gen()->instruction_for_vreg(i); |
|
827 |
tty->print_cr("* vreg %d (HIR instruction %c%d)", i, instr == NULL ? ' ' : instr->type()->tchar(), instr == NULL ? 0 : instr->id()); |
|
828 |
||
829 |
for (int j = 0; j < num_blocks; j++) { |
|
830 |
BlockBegin* block = block_at(j); |
|
831 |
if (block->live_gen().at(i)) { |
|
832 |
tty->print_cr(" used in block B%d", block->block_id()); |
|
833 |
} |
|
834 |
if (block->live_kill().at(i)) { |
|
835 |
tty->print_cr(" defined in block B%d", block->block_id()); |
|
836 |
} |
|
837 |
} |
|
838 |
} |
|
839 |
} |
|
840 |
||
841 |
#endif |
|
842 |
// when this fails, virtual registers are used before they are defined. |
|
843 |
assert(false, "live_in set of first block must be empty"); |
|
844 |
// bailout of if this occurs in product mode. |
|
845 |
bailout("live_in set of first block not empty"); |
|
846 |
} |
|
847 |
} |
|
848 |
||
849 |
||
850 |
// ********** Phase 4: build intervals |
|
851 |
// (fills the list _intervals) |
|
852 |
||
853 |
void LinearScan::add_use(Value value, int from, int to, IntervalUseKind use_kind) { |
|
854 |
assert(!value->type()->is_illegal(), "if this value is used by the interpreter it shouldn't be of indeterminate type"); |
|
855 |
LIR_Opr opr = value->operand(); |
|
856 |
Constant* con = value->as_Constant(); |
|
857 |
||
858 |
if ((con == NULL || con->is_pinned()) && opr->is_register()) { |
|
859 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
860 |
add_use(opr, from, to, use_kind); |
|
861 |
} |
|
862 |
} |
|
863 |
||
864 |
||
865 |
void LinearScan::add_def(LIR_Opr opr, int def_pos, IntervalUseKind use_kind) { |
|
866 |
TRACE_LINEAR_SCAN(2, tty->print(" def "); opr->print(tty); tty->print_cr(" def_pos %d (%d)", def_pos, use_kind)); |
|
867 |
assert(opr->is_register(), "should not be called otherwise"); |
|
868 |
||
869 |
if (opr->is_virtual_register()) { |
|
870 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
871 |
add_def(opr->vreg_number(), def_pos, use_kind, opr->type_register()); |
|
872 |
||
873 |
} else { |
|
874 |
int reg = reg_num(opr); |
|
875 |
if (is_processed_reg_num(reg)) { |
|
876 |
add_def(reg, def_pos, use_kind, opr->type_register()); |
|
877 |
} |
|
878 |
reg = reg_numHi(opr); |
|
879 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
880 |
add_def(reg, def_pos, use_kind, opr->type_register()); |
|
881 |
} |
|
882 |
} |
|
883 |
} |
|
884 |
||
885 |
void LinearScan::add_use(LIR_Opr opr, int from, int to, IntervalUseKind use_kind) { |
|
886 |
TRACE_LINEAR_SCAN(2, tty->print(" use "); opr->print(tty); tty->print_cr(" from %d to %d (%d)", from, to, use_kind)); |
|
887 |
assert(opr->is_register(), "should not be called otherwise"); |
|
888 |
||
889 |
if (opr->is_virtual_register()) { |
|
890 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
891 |
add_use(opr->vreg_number(), from, to, use_kind, opr->type_register()); |
|
892 |
||
893 |
} else { |
|
894 |
int reg = reg_num(opr); |
|
895 |
if (is_processed_reg_num(reg)) { |
|
896 |
add_use(reg, from, to, use_kind, opr->type_register()); |
|
897 |
} |
|
898 |
reg = reg_numHi(opr); |
|
899 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
900 |
add_use(reg, from, to, use_kind, opr->type_register()); |
|
901 |
} |
|
902 |
} |
|
903 |
} |
|
904 |
||
905 |
void LinearScan::add_temp(LIR_Opr opr, int temp_pos, IntervalUseKind use_kind) { |
|
906 |
TRACE_LINEAR_SCAN(2, tty->print(" temp "); opr->print(tty); tty->print_cr(" temp_pos %d (%d)", temp_pos, use_kind)); |
|
907 |
assert(opr->is_register(), "should not be called otherwise"); |
|
908 |
||
909 |
if (opr->is_virtual_register()) { |
|
910 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
911 |
add_temp(opr->vreg_number(), temp_pos, use_kind, opr->type_register()); |
|
912 |
||
913 |
} else { |
|
914 |
int reg = reg_num(opr); |
|
915 |
if (is_processed_reg_num(reg)) { |
|
916 |
add_temp(reg, temp_pos, use_kind, opr->type_register()); |
|
917 |
} |
|
918 |
reg = reg_numHi(opr); |
|
919 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
920 |
add_temp(reg, temp_pos, use_kind, opr->type_register()); |
|
921 |
} |
|
922 |
} |
|
923 |
} |
|
924 |
||
925 |
||
926 |
void LinearScan::add_def(int reg_num, int def_pos, IntervalUseKind use_kind, BasicType type) { |
|
927 |
Interval* interval = interval_at(reg_num); |
|
928 |
if (interval != NULL) { |
|
929 |
assert(interval->reg_num() == reg_num, "wrong interval"); |
|
930 |
||
931 |
if (type != T_ILLEGAL) { |
|
932 |
interval->set_type(type); |
|
933 |
} |
|
934 |
||
935 |
Range* r = interval->first(); |
|
936 |
if (r->from() <= def_pos) { |
|
937 |
// Update the starting point (when a range is first created for a use, its |
|
938 |
// start is the beginning of the current block until a def is encountered.) |
|
939 |
r->set_from(def_pos); |
|
940 |
interval->add_use_pos(def_pos, use_kind); |
|
941 |
||
942 |
} else { |
|
943 |
// Dead value - make vacuous interval |
|
944 |
// also add use_kind for dead intervals |
|
945 |
interval->add_range(def_pos, def_pos + 1); |
|
946 |
interval->add_use_pos(def_pos, use_kind); |
|
947 |
TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: def of reg %d at %d occurs without use", reg_num, def_pos)); |
|
948 |
} |
|
949 |
||
950 |
} else { |
|
951 |
// Dead value - make vacuous interval |
|
952 |
// also add use_kind for dead intervals |
|
953 |
interval = create_interval(reg_num); |
|
954 |
if (type != T_ILLEGAL) { |
|
955 |
interval->set_type(type); |
|
956 |
} |
|
957 |
||
958 |
interval->add_range(def_pos, def_pos + 1); |
|
959 |
interval->add_use_pos(def_pos, use_kind); |
|
960 |
TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: dead value %d at %d in live intervals", reg_num, def_pos)); |
|
961 |
} |
|
962 |
||
963 |
change_spill_definition_pos(interval, def_pos); |
|
964 |
if (use_kind == noUse && interval->spill_state() <= startInMemory) { |
|
965 |
// detection of method-parameters and roundfp-results |
|
966 |
// TODO: move this directly to position where use-kind is computed |
|
967 |
interval->set_spill_state(startInMemory); |
|
968 |
} |
|
969 |
} |
|
970 |
||
971 |
void LinearScan::add_use(int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type) { |
|
972 |
Interval* interval = interval_at(reg_num); |
|
973 |
if (interval == NULL) { |
|
974 |
interval = create_interval(reg_num); |
|
975 |
} |
|
976 |
assert(interval->reg_num() == reg_num, "wrong interval"); |
|
977 |
||
978 |
if (type != T_ILLEGAL) { |
|
979 |
interval->set_type(type); |
|
980 |
} |
|
981 |
||
982 |
interval->add_range(from, to); |
|
983 |
interval->add_use_pos(to, use_kind); |
|
984 |
} |
|
985 |
||
986 |
void LinearScan::add_temp(int reg_num, int temp_pos, IntervalUseKind use_kind, BasicType type) { |
|
987 |
Interval* interval = interval_at(reg_num); |
|
988 |
if (interval == NULL) { |
|
989 |
interval = create_interval(reg_num); |
|
990 |
} |
|
991 |
assert(interval->reg_num() == reg_num, "wrong interval"); |
|
992 |
||
993 |
if (type != T_ILLEGAL) { |
|
994 |
interval->set_type(type); |
|
995 |
} |
|
996 |
||
997 |
interval->add_range(temp_pos, temp_pos + 1); |
|
998 |
interval->add_use_pos(temp_pos, use_kind); |
|
999 |
} |
|
1000 |
||
1001 |
||
1002 |
// the results of this functions are used for optimizing spilling and reloading |
|
1003 |
// if the functions return shouldHaveRegister and the interval is spilled, |
|
1004 |
// it is not reloaded to a register. |
|
1005 |
IntervalUseKind LinearScan::use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr) { |
|
1006 |
if (op->code() == lir_move) { |
|
1007 |
assert(op->as_Op1() != NULL, "lir_move must be LIR_Op1"); |
|
1008 |
LIR_Op1* move = (LIR_Op1*)op; |
|
1009 |
LIR_Opr res = move->result_opr(); |
|
1010 |
bool result_in_memory = res->is_virtual() && gen()->is_vreg_flag_set(res->vreg_number(), LIRGenerator::must_start_in_memory); |
|
1011 |
||
1012 |
if (result_in_memory) { |
|
1013 |
// Begin of an interval with must_start_in_memory set. |
|
1014 |
// This interval will always get a stack slot first, so return noUse. |
|
1015 |
return noUse; |
|
1016 |
||
1017 |
} else if (move->in_opr()->is_stack()) { |
|
1018 |
// method argument (condition must be equal to handle_method_arguments) |
|
1019 |
return noUse; |
|
1020 |
||
1021 |
} else if (move->in_opr()->is_register() && move->result_opr()->is_register()) { |
|
1022 |
// Move from register to register |
|
1023 |
if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) { |
|
1024 |
// special handling of phi-function moves inside osr-entry blocks |
|
1025 |
// input operand must have a register instead of output operand (leads to better register allocation) |
|
1026 |
return shouldHaveRegister; |
|
1027 |
} |
|
1028 |
} |
|
1029 |
} |
|
1030 |
||
1031 |
if (opr->is_virtual() && |
|
1032 |
gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::must_start_in_memory)) { |
|
1033 |
// result is a stack-slot, so prevent immediate reloading |
|
1034 |
return noUse; |
|
1035 |
} |
|
1036 |
||
1037 |
// all other operands require a register |
|
1038 |
return mustHaveRegister; |
|
1039 |
} |
|
1040 |
||
1041 |
IntervalUseKind LinearScan::use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr) { |
|
1042 |
if (op->code() == lir_move) { |
|
1043 |
assert(op->as_Op1() != NULL, "lir_move must be LIR_Op1"); |
|
1044 |
LIR_Op1* move = (LIR_Op1*)op; |
|
1045 |
LIR_Opr res = move->result_opr(); |
|
1046 |
bool result_in_memory = res->is_virtual() && gen()->is_vreg_flag_set(res->vreg_number(), LIRGenerator::must_start_in_memory); |
|
1047 |
||
1048 |
if (result_in_memory) { |
|
1049 |
// Move to an interval with must_start_in_memory set. |
|
1050 |
// To avoid moves from stack to stack (not allowed) force the input operand to a register |
|
1051 |
return mustHaveRegister; |
|
1052 |
||
1053 |
} else if (move->in_opr()->is_register() && move->result_opr()->is_register()) { |
|
1054 |
// Move from register to register |
|
1055 |
if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) { |
|
1056 |
// special handling of phi-function moves inside osr-entry blocks |
|
1057 |
// input operand must have a register instead of output operand (leads to better register allocation) |
|
1058 |
return mustHaveRegister; |
|
1059 |
} |
|
1060 |
||
1061 |
// The input operand is not forced to a register (moves from stack to register are allowed), |
|
1062 |
// but it is faster if the input operand is in a register |
|
1063 |
return shouldHaveRegister; |
|
1064 |
} |
|
1065 |
} |
|
1066 |
||
1067 |
||
1066 | 1068 |
#ifdef X86 |
1 | 1069 |
if (op->code() == lir_cmove) { |
1070 |
// conditional moves can handle stack operands |
|
1071 |
assert(op->result_opr()->is_register(), "result must always be in a register"); |
|
1072 |
return shouldHaveRegister; |
|
1073 |
} |
|
1074 |
||
1075 |
// optimizations for second input operand of arithmehtic operations on Intel |
|
1076 |
// this operand is allowed to be on the stack in some cases |
|
1077 |
BasicType opr_type = opr->type_register(); |
|
1078 |
if (opr_type == T_FLOAT || opr_type == T_DOUBLE) { |
|
1079 |
if ((UseSSE == 1 && opr_type == T_FLOAT) || UseSSE >= 2) { |
|
1080 |
// SSE float instruction (T_DOUBLE only supported with SSE2) |
|
1081 |
switch (op->code()) { |
|
1082 |
case lir_cmp: |
|
1083 |
case lir_add: |
|
1084 |
case lir_sub: |
|
1085 |
case lir_mul: |
|
1086 |
case lir_div: |
|
1087 |
{ |
|
1088 |
assert(op->as_Op2() != NULL, "must be LIR_Op2"); |
|
1089 |
LIR_Op2* op2 = (LIR_Op2*)op; |
|
1090 |
if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) { |
|
1091 |
assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register"); |
|
1092 |
return shouldHaveRegister; |
|
1093 |
} |
|
1094 |
} |
|
1095 |
} |
|
1096 |
} else { |
|
1097 |
// FPU stack float instruction |
|
1098 |
switch (op->code()) { |
|
1099 |
case lir_add: |
|
1100 |
case lir_sub: |
|
1101 |
case lir_mul: |
|
1102 |
case lir_div: |
|
1103 |
{ |
|
1104 |
assert(op->as_Op2() != NULL, "must be LIR_Op2"); |
|
1105 |
LIR_Op2* op2 = (LIR_Op2*)op; |
|
1106 |
if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) { |
|
1107 |
assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register"); |
|
1108 |
return shouldHaveRegister; |
|
1109 |
} |
|
1110 |
} |
|
1111 |
} |
|
1112 |
} |
|
1113 |
||
1114 |
} else if (opr_type != T_LONG) { |
|
1115 |
// integer instruction (note: long operands must always be in register) |
|
1116 |
switch (op->code()) { |
|
1117 |
case lir_cmp: |
|
1118 |
case lir_add: |
|
1119 |
case lir_sub: |
|
1120 |
case lir_logic_and: |
|
1121 |
case lir_logic_or: |
|
1122 |
case lir_logic_xor: |
|
1123 |
{ |
|
1124 |
assert(op->as_Op2() != NULL, "must be LIR_Op2"); |
|
1125 |
LIR_Op2* op2 = (LIR_Op2*)op; |
|
1126 |
if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) { |
|
1127 |
assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register"); |
|
1128 |
return shouldHaveRegister; |
|
1129 |
} |
|
1130 |
} |
|
1131 |
} |
|
1132 |
} |
|
1066 | 1133 |
#endif // X86 |
1 | 1134 |
|
1135 |
// all other operands require a register |
|
1136 |
return mustHaveRegister; |
|
1137 |
} |
|
1138 |
||
1139 |
||
1140 |
void LinearScan::handle_method_arguments(LIR_Op* op) { |
|
1141 |
// special handling for method arguments (moves from stack to virtual register): |
|
1142 |
// the interval gets no register assigned, but the stack slot. |
|
1143 |
// it is split before the first use by the register allocator. |
|
1144 |
||
1145 |
if (op->code() == lir_move) { |
|
1146 |
assert(op->as_Op1() != NULL, "must be LIR_Op1"); |
|
1147 |
LIR_Op1* move = (LIR_Op1*)op; |
|
1148 |
||
1149 |
if (move->in_opr()->is_stack()) { |
|
1150 |
#ifdef ASSERT |
|
1151 |
int arg_size = compilation()->method()->arg_size(); |
|
1152 |
LIR_Opr o = move->in_opr(); |
|
1153 |
if (o->is_single_stack()) { |
|
1154 |
assert(o->single_stack_ix() >= 0 && o->single_stack_ix() < arg_size, "out of range"); |
|
1155 |
} else if (o->is_double_stack()) { |
|
1156 |
assert(o->double_stack_ix() >= 0 && o->double_stack_ix() < arg_size, "out of range"); |
|
1157 |
} else { |
|
1158 |
ShouldNotReachHere(); |
|
1159 |
} |
|
1160 |
||
1161 |
assert(move->id() > 0, "invalid id"); |
|
1162 |
assert(block_of_op_with_id(move->id())->number_of_preds() == 0, "move from stack must be in first block"); |
|
1163 |
assert(move->result_opr()->is_virtual(), "result of move must be a virtual register"); |
|
1164 |
||
1165 |
TRACE_LINEAR_SCAN(4, tty->print_cr("found move from stack slot %d to vreg %d", o->is_single_stack() ? o->single_stack_ix() : o->double_stack_ix(), reg_num(move->result_opr()))); |
|
1166 |
#endif |
|
1167 |
||
1168 |
Interval* interval = interval_at(reg_num(move->result_opr())); |
|
1169 |
||
1170 |
int stack_slot = LinearScan::nof_regs + (move->in_opr()->is_single_stack() ? move->in_opr()->single_stack_ix() : move->in_opr()->double_stack_ix()); |
|
1171 |
interval->set_canonical_spill_slot(stack_slot); |
|
1172 |
interval->assign_reg(stack_slot); |
|
1173 |
} |
|
1174 |
} |
|
1175 |
} |
|
1176 |
||
1177 |
void LinearScan::handle_doubleword_moves(LIR_Op* op) { |
|
1178 |
// special handling for doubleword move from memory to register: |
|
1179 |
// in this case the registers of the input address and the result |
|
1180 |
// registers must not overlap -> add a temp range for the input registers |
|
1181 |
if (op->code() == lir_move) { |
|
1182 |
assert(op->as_Op1() != NULL, "must be LIR_Op1"); |
|
1183 |
LIR_Op1* move = (LIR_Op1*)op; |
|
1184 |
||
1185 |
if (move->result_opr()->is_double_cpu() && move->in_opr()->is_pointer()) { |
|
1186 |
LIR_Address* address = move->in_opr()->as_address_ptr(); |
|
1187 |
if (address != NULL) { |
|
1188 |
if (address->base()->is_valid()) { |
|
1189 |
add_temp(address->base(), op->id(), noUse); |
|
1190 |
} |
|
1191 |
if (address->index()->is_valid()) { |
|
1192 |
add_temp(address->index(), op->id(), noUse); |
|
1193 |
} |
|
1194 |
} |
|
1195 |
} |
|
1196 |
} |
|
1197 |
} |
|
1198 |
||
1199 |
void LinearScan::add_register_hints(LIR_Op* op) { |
|
1200 |
switch (op->code()) { |
|
1201 |
case lir_move: // fall through |
|
1202 |
case lir_convert: { |
|
1203 |
assert(op->as_Op1() != NULL, "lir_move, lir_convert must be LIR_Op1"); |
|
1204 |
LIR_Op1* move = (LIR_Op1*)op; |
|
1205 |
||
1206 |
LIR_Opr move_from = move->in_opr(); |
|
1207 |
LIR_Opr move_to = move->result_opr(); |
|
1208 |
||
1209 |
if (move_to->is_register() && move_from->is_register()) { |
|
1210 |
Interval* from = interval_at(reg_num(move_from)); |
|
1211 |
Interval* to = interval_at(reg_num(move_to)); |
|
1212 |
if (from != NULL && to != NULL) { |
|
1213 |
to->set_register_hint(from); |
|
1214 |
TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", move->id(), from->reg_num(), to->reg_num())); |
|
1215 |
} |
|
1216 |
} |
|
1217 |
break; |
|
1218 |
} |
|
1219 |
case lir_cmove: { |
|
1220 |
assert(op->as_Op2() != NULL, "lir_cmove must be LIR_Op2"); |
|
1221 |
LIR_Op2* cmove = (LIR_Op2*)op; |
|
1222 |
||
1223 |
LIR_Opr move_from = cmove->in_opr1(); |
|
1224 |
LIR_Opr move_to = cmove->result_opr(); |
|
1225 |
||
1226 |
if (move_to->is_register() && move_from->is_register()) { |
|
1227 |
Interval* from = interval_at(reg_num(move_from)); |
|
1228 |
Interval* to = interval_at(reg_num(move_to)); |
|
1229 |
if (from != NULL && to != NULL) { |
|
1230 |
to->set_register_hint(from); |
|
1231 |
TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", cmove->id(), from->reg_num(), to->reg_num())); |
|
1232 |
} |
|
1233 |
} |
|
1234 |
break; |
|
1235 |
} |
|
1236 |
} |
|
1237 |
} |
|
1238 |
||
1239 |
||
1240 |
void LinearScan::build_intervals() { |
|
1241 |
TIME_LINEAR_SCAN(timer_build_intervals); |
|
1242 |
||
1243 |
// initialize interval list with expected number of intervals |
|
1244 |
// (32 is added to have some space for split children without having to resize the list) |
|
1245 |
_intervals = IntervalList(num_virtual_regs() + 32); |
|
1246 |
// initialize all slots that are used by build_intervals |
|
1247 |
_intervals.at_put_grow(num_virtual_regs() - 1, NULL, NULL); |
|
1248 |
||
1249 |
// create a list with all caller-save registers (cpu, fpu, xmm) |
|
1250 |
// when an instruction is a call, a temp range is created for all these registers |
|
1251 |
int num_caller_save_registers = 0; |
|
1252 |
int caller_save_registers[LinearScan::nof_regs]; |
|
1253 |
||
1254 |
int i; |
|
1255 |
for (i = 0; i < FrameMap::nof_caller_save_cpu_regs; i++) { |
|
1256 |
LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i); |
|
1257 |
assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands"); |
|
1258 |
assert(reg_numHi(opr) == -1, "missing addition of range for hi-register"); |
|
1259 |
caller_save_registers[num_caller_save_registers++] = reg_num(opr); |
|
1260 |
} |
|
1261 |
||
1262 |
// temp ranges for fpu registers are only created when the method has |
|
1263 |
// virtual fpu operands. Otherwise no allocation for fpu registers is |
|
1264 |
// perfomed and so the temp ranges would be useless |
|
1265 |
if (has_fpu_registers()) { |
|
1066 | 1266 |
#ifdef X86 |
1 | 1267 |
if (UseSSE < 2) { |
1268 |
#endif |
|
1269 |
for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) { |
|
1270 |
LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i); |
|
1271 |
assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands"); |
|
1272 |
assert(reg_numHi(opr) == -1, "missing addition of range for hi-register"); |
|
1273 |
caller_save_registers[num_caller_save_registers++] = reg_num(opr); |
|
1274 |
} |
|
1066 | 1275 |
#ifdef X86 |
1 | 1276 |
} |
1277 |
if (UseSSE > 0) { |
|
1278 |
for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) { |
|
1279 |
LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(i); |
|
1280 |
assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands"); |
|
1281 |
assert(reg_numHi(opr) == -1, "missing addition of range for hi-register"); |
|
1282 |
caller_save_registers[num_caller_save_registers++] = reg_num(opr); |
|
1283 |
} |
|
1284 |
} |
|
1285 |
#endif |
|
1286 |
} |
|
1287 |
assert(num_caller_save_registers <= LinearScan::nof_regs, "out of bounds"); |
|
1288 |
||
1289 |
||
1290 |
LIR_OpVisitState visitor; |
|
1291 |
||
1292 |
// iterate all blocks in reverse order |
|
1293 |
for (i = block_count() - 1; i >= 0; i--) { |
|
1294 |
BlockBegin* block = block_at(i); |
|
1295 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
1296 |
int block_from = block->first_lir_instruction_id(); |
|
1297 |
int block_to = block->last_lir_instruction_id(); |
|
1298 |
||
1299 |
assert(block_from == instructions->at(0)->id(), "must be"); |
|
1300 |
assert(block_to == instructions->at(instructions->length() - 1)->id(), "must be"); |
|
1301 |
||
1302 |
// Update intervals for registers live at the end of this block; |
|
1303 |
BitMap live = block->live_out(); |
|
1066 | 1304 |
int size = (int)live.size(); |
1305 |
for (int number = (int)live.get_next_one_offset(0, size); number < size; number = (int)live.get_next_one_offset(number + 1, size)) { |
|
1 | 1306 |
assert(live.at(number), "should not stop here otherwise"); |
1307 |
assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds"); |
|
1308 |
TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2)); |
|
1309 |
||
1310 |
add_use(number, block_from, block_to + 2, noUse, T_ILLEGAL); |
|
1311 |
||
1312 |
// add special use positions for loop-end blocks when the |
|
1313 |
// interval is used anywhere inside this loop. It's possible |
|
1314 |
// that the block was part of a non-natural loop, so it might |
|
1315 |
// have an invalid loop index. |
|
1316 |
if (block->is_set(BlockBegin::linear_scan_loop_end_flag) && |
|
1317 |
block->loop_index() != -1 && |
|
1318 |
is_interval_in_loop(number, block->loop_index())) { |
|
1319 |
interval_at(number)->add_use_pos(block_to + 1, loopEndMarker); |
|
1320 |
} |
|
1321 |
} |
|
1322 |
||
1323 |
// iterate all instructions of the block in reverse order. |
|
1324 |
// skip the first instruction because it is always a label |
|
1325 |
// definitions of intervals are processed before uses |
|
1326 |
assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label"); |
|
1327 |
for (int j = instructions->length() - 1; j >= 1; j--) { |
|
1328 |
LIR_Op* op = instructions->at(j); |
|
1329 |
int op_id = op->id(); |
|
1330 |
||
1331 |
// visit operation to collect all operands |
|
1332 |
visitor.visit(op); |
|
1333 |
||
1334 |
// add a temp range for each register if operation destroys caller-save registers |
|
1335 |
if (visitor.has_call()) { |
|
1336 |
for (int k = 0; k < num_caller_save_registers; k++) { |
|
1337 |
add_temp(caller_save_registers[k], op_id, noUse, T_ILLEGAL); |
|
1338 |
} |
|
1339 |
TRACE_LINEAR_SCAN(4, tty->print_cr("operation destroys all caller-save registers")); |
|
1340 |
} |
|
1341 |
||
1342 |
// Add any platform dependent temps |
|
1343 |
pd_add_temps(op); |
|
1344 |
||
1345 |
// visit definitions (output and temp operands) |
|
1346 |
int k, n; |
|
1347 |
n = visitor.opr_count(LIR_OpVisitState::outputMode); |
|
1348 |
for (k = 0; k < n; k++) { |
|
1349 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, k); |
|
1350 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
1351 |
add_def(opr, op_id, use_kind_of_output_operand(op, opr)); |
|
1352 |
} |
|
1353 |
||
1354 |
n = visitor.opr_count(LIR_OpVisitState::tempMode); |
|
1355 |
for (k = 0; k < n; k++) { |
|
1356 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, k); |
|
1357 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
1358 |
add_temp(opr, op_id, mustHaveRegister); |
|
1359 |
} |
|
1360 |
||
1361 |
// visit uses (input operands) |
|
1362 |
n = visitor.opr_count(LIR_OpVisitState::inputMode); |
|
1363 |
for (k = 0; k < n; k++) { |
|
1364 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, k); |
|
1365 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
1366 |
add_use(opr, block_from, op_id, use_kind_of_input_operand(op, opr)); |
|
1367 |
} |
|
1368 |
||
1369 |
// Add uses of live locals from interpreter's point of view for proper |
|
1370 |
// debug information generation |
|
1371 |
// Treat these operands as temp values (if the life range is extended |
|
1372 |
// to a call site, the value would be in a register at the call otherwise) |
|
1373 |
n = visitor.info_count(); |
|
1374 |
for (k = 0; k < n; k++) { |
|
1375 |
CodeEmitInfo* info = visitor.info_at(k); |
|
1376 |
ValueStack* stack = info->stack(); |
|
1377 |
for_each_state_value(stack, value, |
|
1378 |
add_use(value, block_from, op_id + 1, noUse); |
|
1379 |
); |
|
1380 |
} |
|
1381 |
||
1382 |
// special steps for some instructions (especially moves) |
|
1383 |
handle_method_arguments(op); |
|
1384 |
handle_doubleword_moves(op); |
|
1385 |
add_register_hints(op); |
|
1386 |
||
1387 |
} // end of instruction iteration |
|
1388 |
} // end of block iteration |
|
1389 |
||
1390 |
||
1391 |
// add the range [0, 1[ to all fixed intervals |
|
1392 |
// -> the register allocator need not handle unhandled fixed intervals |
|
1393 |
for (int n = 0; n < LinearScan::nof_regs; n++) { |
|
1394 |
Interval* interval = interval_at(n); |
|
1395 |
if (interval != NULL) { |
|
1396 |
interval->add_range(0, 1); |
|
1397 |
} |
|
1398 |
} |
|
1399 |
} |
|
1400 |
||
1401 |
||
1402 |
// ********** Phase 5: actual register allocation |
|
1403 |
||
1404 |
int LinearScan::interval_cmp(Interval** a, Interval** b) { |
|
1405 |
if (*a != NULL) { |
|
1406 |
if (*b != NULL) { |
|
1407 |
return (*a)->from() - (*b)->from(); |
|
1408 |
} else { |
|
1409 |
return -1; |
|
1410 |
} |
|
1411 |
} else { |
|
1412 |
if (*b != NULL) { |
|
1413 |
return 1; |
|
1414 |
} else { |
|
1415 |
return 0; |
|
1416 |
} |
|
1417 |
} |
|
1418 |
} |
|
1419 |
||
1420 |
#ifndef PRODUCT |
|
1421 |
bool LinearScan::is_sorted(IntervalArray* intervals) { |
|
1422 |
int from = -1; |
|
1423 |
int i, j; |
|
1424 |
for (i = 0; i < intervals->length(); i ++) { |
|
1425 |
Interval* it = intervals->at(i); |
|
1426 |
if (it != NULL) { |
|
1427 |
if (from > it->from()) { |
|
1428 |
assert(false, ""); |
|
1429 |
return false; |
|
1430 |
} |
|
1431 |
from = it->from(); |
|
1432 |
} |
|
1433 |
} |
|
1434 |
||
1435 |
// check in both directions if sorted list and unsorted list contain same intervals |
|
1436 |
for (i = 0; i < interval_count(); i++) { |
|
1437 |
if (interval_at(i) != NULL) { |
|
1438 |
int num_found = 0; |
|
1439 |
for (j = 0; j < intervals->length(); j++) { |
|
1440 |
if (interval_at(i) == intervals->at(j)) { |
|
1441 |
num_found++; |
|
1442 |
} |
|
1443 |
} |
|
1444 |
assert(num_found == 1, "lists do not contain same intervals"); |
|
1445 |
} |
|
1446 |
} |
|
1447 |
for (j = 0; j < intervals->length(); j++) { |
|
1448 |
int num_found = 0; |
|
1449 |
for (i = 0; i < interval_count(); i++) { |
|
1450 |
if (interval_at(i) == intervals->at(j)) { |
|
1451 |
num_found++; |
|
1452 |
} |
|
1453 |
} |
|
1454 |
assert(num_found == 1, "lists do not contain same intervals"); |
|
1455 |
} |
|
1456 |
||
1457 |
return true; |
|
1458 |
} |
|
1459 |
#endif |
|
1460 |
||
1461 |
void LinearScan::add_to_list(Interval** first, Interval** prev, Interval* interval) { |
|
1462 |
if (*prev != NULL) { |
|
1463 |
(*prev)->set_next(interval); |
|
1464 |
} else { |
|
1465 |
*first = interval; |
|
1466 |
} |
|
1467 |
*prev = interval; |
|
1468 |
} |
|
1469 |
||
1470 |
void LinearScan::create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i)) { |
|
1471 |
assert(is_sorted(_sorted_intervals), "interval list is not sorted"); |
|
1472 |
||
1473 |
*list1 = *list2 = Interval::end(); |
|
1474 |
||
1475 |
Interval* list1_prev = NULL; |
|
1476 |
Interval* list2_prev = NULL; |
|
1477 |
Interval* v; |
|
1478 |
||
1479 |
const int n = _sorted_intervals->length(); |
|
1480 |
for (int i = 0; i < n; i++) { |
|
1481 |
v = _sorted_intervals->at(i); |
|
1482 |
if (v == NULL) continue; |
|
1483 |
||
1484 |
if (is_list1(v)) { |
|
1485 |
add_to_list(list1, &list1_prev, v); |
|
1486 |
} else if (is_list2 == NULL || is_list2(v)) { |
|
1487 |
add_to_list(list2, &list2_prev, v); |
|
1488 |
} |
|
1489 |
} |
|
1490 |
||
1491 |
if (list1_prev != NULL) list1_prev->set_next(Interval::end()); |
|
1492 |
if (list2_prev != NULL) list2_prev->set_next(Interval::end()); |
|
1493 |
||
1494 |
assert(list1_prev == NULL || list1_prev->next() == Interval::end(), "linear list ends not with sentinel"); |
|
1495 |
assert(list2_prev == NULL || list2_prev->next() == Interval::end(), "linear list ends not with sentinel"); |
|
1496 |
} |
|
1497 |
||
1498 |
||
1499 |
void LinearScan::sort_intervals_before_allocation() { |
|
1500 |
TIME_LINEAR_SCAN(timer_sort_intervals_before); |
|
1501 |
||
1502 |
IntervalList* unsorted_list = &_intervals; |
|
1503 |
int unsorted_len = unsorted_list->length(); |
|
1504 |
int sorted_len = 0; |
|
1505 |
int unsorted_idx; |
|
1506 |
int sorted_idx = 0; |
|
1507 |
int sorted_from_max = -1; |
|
1508 |
||
1509 |
// calc number of items for sorted list (sorted list must not contain NULL values) |
|
1510 |
for (unsorted_idx = 0; unsorted_idx < unsorted_len; unsorted_idx++) { |
|
1511 |
if (unsorted_list->at(unsorted_idx) != NULL) { |
|
1512 |
sorted_len++; |
|
1513 |
} |
|
1514 |
} |
|
1515 |
IntervalArray* sorted_list = new IntervalArray(sorted_len); |
|
1516 |
||
1517 |
// special sorting algorithm: the original interval-list is almost sorted, |
|
1518 |
// only some intervals are swapped. So this is much faster than a complete QuickSort |
|
1519 |
for (unsorted_idx = 0; unsorted_idx < unsorted_len; unsorted_idx++) { |
|
1520 |
Interval* cur_interval = unsorted_list->at(unsorted_idx); |
|
1521 |
||
1522 |
if (cur_interval != NULL) { |
|
1523 |
int cur_from = cur_interval->from(); |
|
1524 |
||
1525 |
if (sorted_from_max <= cur_from) { |
|
1526 |
sorted_list->at_put(sorted_idx++, cur_interval); |
|
1527 |
sorted_from_max = cur_interval->from(); |
|
1528 |
} else { |
|
1529 |
// the asumption that the intervals are already sorted failed, |
|
1530 |
// so this interval must be sorted in manually |
|
1531 |
int j; |
|
1532 |
for (j = sorted_idx - 1; j >= 0 && cur_from < sorted_list->at(j)->from(); j--) { |
|
1533 |
sorted_list->at_put(j + 1, sorted_list->at(j)); |
|
1534 |
} |
|
1535 |
sorted_list->at_put(j + 1, cur_interval); |
|
1536 |
sorted_idx++; |
|
1537 |
} |
|
1538 |
} |
|
1539 |
} |
|
1540 |
_sorted_intervals = sorted_list; |
|
1541 |
} |
|
1542 |
||
1543 |
void LinearScan::sort_intervals_after_allocation() { |
|
1544 |
TIME_LINEAR_SCAN(timer_sort_intervals_after); |
|
1545 |
||
1546 |
IntervalArray* old_list = _sorted_intervals; |
|
1547 |
IntervalList* new_list = _new_intervals_from_allocation; |
|
1548 |
int old_len = old_list->length(); |
|
1549 |
int new_len = new_list->length(); |
|
1550 |
||
1551 |
if (new_len == 0) { |
|
1552 |
// no intervals have been added during allocation, so sorted list is already up to date |
|
1553 |
return; |
|
1554 |
} |
|
1555 |
||
1556 |
// conventional sort-algorithm for new intervals |
|
1557 |
new_list->sort(interval_cmp); |
|
1558 |
||
1559 |
// merge old and new list (both already sorted) into one combined list |
|
1560 |
IntervalArray* combined_list = new IntervalArray(old_len + new_len); |
|
1561 |
int old_idx = 0; |
|
1562 |
int new_idx = 0; |
|
1563 |
||
1564 |
while (old_idx + new_idx < old_len + new_len) { |
|
1565 |
if (new_idx >= new_len || (old_idx < old_len && old_list->at(old_idx)->from() <= new_list->at(new_idx)->from())) { |
|
1566 |
combined_list->at_put(old_idx + new_idx, old_list->at(old_idx)); |
|
1567 |
old_idx++; |
|
1568 |
} else { |
|
1569 |
combined_list->at_put(old_idx + new_idx, new_list->at(new_idx)); |
|
1570 |
new_idx++; |
|
1571 |
} |
|
1572 |
} |
|
1573 |
||
1574 |
_sorted_intervals = combined_list; |
|
1575 |
} |
|
1576 |
||
1577 |
||
1578 |
void LinearScan::allocate_registers() { |
|
1579 |
TIME_LINEAR_SCAN(timer_allocate_registers); |
|
1580 |
||
1581 |
Interval* precolored_cpu_intervals, *not_precolored_cpu_intervals; |
|
1582 |
Interval* precolored_fpu_intervals, *not_precolored_fpu_intervals; |
|
1583 |
||
1584 |
create_unhandled_lists(&precolored_cpu_intervals, ¬_precolored_cpu_intervals, is_precolored_cpu_interval, is_virtual_cpu_interval); |
|
1585 |
if (has_fpu_registers()) { |
|
1586 |
create_unhandled_lists(&precolored_fpu_intervals, ¬_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval); |
|
1587 |
#ifdef ASSERT |
|
1588 |
} else { |
|
1589 |
// fpu register allocation is omitted because no virtual fpu registers are present |
|
1590 |
// just check this again... |
|
1591 |
create_unhandled_lists(&precolored_fpu_intervals, ¬_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval); |
|
1592 |
assert(not_precolored_fpu_intervals == Interval::end(), "missed an uncolored fpu interval"); |
|
1593 |
#endif |
|
1594 |
} |
|
1595 |
||
1596 |
// allocate cpu registers |
|
1597 |
LinearScanWalker cpu_lsw(this, precolored_cpu_intervals, not_precolored_cpu_intervals); |
|
1598 |
cpu_lsw.walk(); |
|
1599 |
cpu_lsw.finish_allocation(); |
|
1600 |
||
1601 |
if (has_fpu_registers()) { |
|
1602 |
// allocate fpu registers |
|
1603 |
LinearScanWalker fpu_lsw(this, precolored_fpu_intervals, not_precolored_fpu_intervals); |
|
1604 |
fpu_lsw.walk(); |
|
1605 |
fpu_lsw.finish_allocation(); |
|
1606 |
} |
|
1607 |
} |
|
1608 |
||
1609 |
||
1610 |
// ********** Phase 6: resolve data flow |
|
1611 |
// (insert moves at edges between blocks if intervals have been split) |
|
1612 |
||
1613 |
// wrapper for Interval::split_child_at_op_id that performs a bailout in product mode |
|
1614 |
// instead of returning NULL |
|
1615 |
Interval* LinearScan::split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode) { |
|
1616 |
Interval* result = interval->split_child_at_op_id(op_id, mode); |
|
1617 |
if (result != NULL) { |
|
1618 |
return result; |
|
1619 |
} |
|
1620 |
||
1621 |
assert(false, "must find an interval, but do a clean bailout in product mode"); |
|
1622 |
result = new Interval(LIR_OprDesc::vreg_base); |
|
1623 |
result->assign_reg(0); |
|
1624 |
result->set_type(T_INT); |
|
1625 |
BAILOUT_("LinearScan: interval is NULL", result); |
|
1626 |
} |
|
1627 |
||
1628 |
||
1629 |
Interval* LinearScan::interval_at_block_begin(BlockBegin* block, int reg_num) { |
|
1630 |
assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds"); |
|
1631 |
assert(interval_at(reg_num) != NULL, "no interval found"); |
|
1632 |
||
1633 |
return split_child_at_op_id(interval_at(reg_num), block->first_lir_instruction_id(), LIR_OpVisitState::outputMode); |
|
1634 |
} |
|
1635 |
||
1636 |
Interval* LinearScan::interval_at_block_end(BlockBegin* block, int reg_num) { |
|
1637 |
assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds"); |
|
1638 |
assert(interval_at(reg_num) != NULL, "no interval found"); |
|
1639 |
||
1640 |
return split_child_at_op_id(interval_at(reg_num), block->last_lir_instruction_id() + 1, LIR_OpVisitState::outputMode); |
|
1641 |
} |
|
1642 |
||
1643 |
Interval* LinearScan::interval_at_op_id(int reg_num, int op_id) { |
|
1644 |
assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds"); |
|
1645 |
assert(interval_at(reg_num) != NULL, "no interval found"); |
|
1646 |
||
1647 |
return split_child_at_op_id(interval_at(reg_num), op_id, LIR_OpVisitState::inputMode); |
|
1648 |
} |
|
1649 |
||
1650 |
||
1651 |
void LinearScan::resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver) { |
|
1652 |
DEBUG_ONLY(move_resolver.check_empty()); |
|
1653 |
||
1654 |
const int num_regs = num_virtual_regs(); |
|
1655 |
const int size = live_set_size(); |
|
1656 |
const BitMap live_at_edge = to_block->live_in(); |
|
1657 |
||
1658 |
// visit all registers where the live_at_edge bit is set |
|
1066 | 1659 |
for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) { |
1 | 1660 |
assert(r < num_regs, "live information set for not exisiting interval"); |
1661 |
assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge"); |
|
1662 |
||
1663 |
Interval* from_interval = interval_at_block_end(from_block, r); |
|
1664 |
Interval* to_interval = interval_at_block_begin(to_block, r); |
|
1665 |
||
1666 |
if (from_interval != to_interval && (from_interval->assigned_reg() != to_interval->assigned_reg() || from_interval->assigned_regHi() != to_interval->assigned_regHi())) { |
|
1667 |
// need to insert move instruction |
|
1668 |
move_resolver.add_mapping(from_interval, to_interval); |
|
1669 |
} |
|
1670 |
} |
|
1671 |
} |
|
1672 |
||
1673 |
||
1674 |
void LinearScan::resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver) { |
|
1675 |
if (from_block->number_of_sux() <= 1) { |
|
1676 |
TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at end of from_block B%d", from_block->block_id())); |
|
1677 |
||
1678 |
LIR_OpList* instructions = from_block->lir()->instructions_list(); |
|
1679 |
LIR_OpBranch* branch = instructions->last()->as_OpBranch(); |
|
1680 |
if (branch != NULL) { |
|
1681 |
// insert moves before branch |
|
1682 |
assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump"); |
|
1683 |
move_resolver.set_insert_position(from_block->lir(), instructions->length() - 2); |
|
1684 |
} else { |
|
1685 |
move_resolver.set_insert_position(from_block->lir(), instructions->length() - 1); |
|
1686 |
} |
|
1687 |
||
1688 |
} else { |
|
1689 |
TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at beginning of to_block B%d", to_block->block_id())); |
|
1690 |
#ifdef ASSERT |
|
1691 |
assert(from_block->lir()->instructions_list()->at(0)->as_OpLabel() != NULL, "block does not start with a label"); |
|
1692 |
||
1693 |
// because the number of predecessor edges matches the number of |
|
1694 |
// successor edges, blocks which are reached by switch statements |
|
1695 |
// may have be more than one predecessor but it will be guaranteed |
|
1696 |
// that all predecessors will be the same. |
|
1697 |
for (int i = 0; i < to_block->number_of_preds(); i++) { |
|
1698 |
assert(from_block == to_block->pred_at(i), "all critical edges must be broken"); |
|
1699 |
} |
|
1700 |
#endif |
|
1701 |
||
1702 |
move_resolver.set_insert_position(to_block->lir(), 0); |
|
1703 |
} |
|
1704 |
} |
|
1705 |
||
1706 |
||
1707 |
// insert necessary moves (spilling or reloading) at edges between blocks if interval has been split |
|
1708 |
void LinearScan::resolve_data_flow() { |
|
1709 |
TIME_LINEAR_SCAN(timer_resolve_data_flow); |
|
1710 |
||
1711 |
int num_blocks = block_count(); |
|
1712 |
MoveResolver move_resolver(this); |
|
1713 |
BitMap block_completed(num_blocks); block_completed.clear(); |
|
1714 |
BitMap already_resolved(num_blocks); already_resolved.clear(); |
|
1715 |
||
1716 |
int i; |
|
1717 |
for (i = 0; i < num_blocks; i++) { |
|
1718 |
BlockBegin* block = block_at(i); |
|
1719 |
||
1720 |
// check if block has only one predecessor and only one successor |
|
1721 |
if (block->number_of_preds() == 1 && block->number_of_sux() == 1 && block->number_of_exception_handlers() == 0) { |
|
1722 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
1723 |
assert(instructions->at(0)->code() == lir_label, "block must start with label"); |
|
1724 |
assert(instructions->last()->code() == lir_branch, "block with successors must end with branch"); |
|
1725 |
assert(instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block with successor must end with unconditional branch"); |
|
1726 |
||
1727 |
// check if block is empty (only label and branch) |
|
1728 |
if (instructions->length() == 2) { |
|
1729 |
BlockBegin* pred = block->pred_at(0); |
|
1730 |
BlockBegin* sux = block->sux_at(0); |
|
1731 |
||
1732 |
// prevent optimization of two consecutive blocks |
|
1733 |
if (!block_completed.at(pred->linear_scan_number()) && !block_completed.at(sux->linear_scan_number())) { |
|
1734 |
TRACE_LINEAR_SCAN(3, tty->print_cr("**** optimizing empty block B%d (pred: B%d, sux: B%d)", block->block_id(), pred->block_id(), sux->block_id())); |
|
1735 |
block_completed.set_bit(block->linear_scan_number()); |
|
1736 |
||
1737 |
// directly resolve between pred and sux (without looking at the empty block between) |
|
1738 |
resolve_collect_mappings(pred, sux, move_resolver); |
|
1739 |
if (move_resolver.has_mappings()) { |
|
1740 |
move_resolver.set_insert_position(block->lir(), 0); |
|
1741 |
move_resolver.resolve_and_append_moves(); |
|
1742 |
} |
|
1743 |
} |
|
1744 |
} |
|
1745 |
} |
|
1746 |
} |
|
1747 |
||
1748 |
||
1749 |
for (i = 0; i < num_blocks; i++) { |
|
1750 |
if (!block_completed.at(i)) { |
|
1751 |
BlockBegin* from_block = block_at(i); |
|
1752 |
already_resolved.set_from(block_completed); |
|
1753 |
||
1754 |
int num_sux = from_block->number_of_sux(); |
|
1755 |
for (int s = 0; s < num_sux; s++) { |
|
1756 |
BlockBegin* to_block = from_block->sux_at(s); |
|
1757 |
||
1758 |
// check for duplicate edges between the same blocks (can happen with switch blocks) |
|
1759 |
if (!already_resolved.at(to_block->linear_scan_number())) { |
|
1760 |
TRACE_LINEAR_SCAN(3, tty->print_cr("**** processing edge between B%d and B%d", from_block->block_id(), to_block->block_id())); |
|
1761 |
already_resolved.set_bit(to_block->linear_scan_number()); |
|
1762 |
||
1763 |
// collect all intervals that have been split between from_block and to_block |
|
1764 |
resolve_collect_mappings(from_block, to_block, move_resolver); |
|
1765 |
if (move_resolver.has_mappings()) { |
|
1766 |
resolve_find_insert_pos(from_block, to_block, move_resolver); |
|
1767 |
move_resolver.resolve_and_append_moves(); |
|
1768 |
} |
|
1769 |
} |
|
1770 |
} |
|
1771 |
} |
|
1772 |
} |
|
1773 |
} |
|
1774 |
||
1775 |
||
1776 |
void LinearScan::resolve_exception_entry(BlockBegin* block, int reg_num, MoveResolver &move_resolver) { |
|
1777 |
if (interval_at(reg_num) == NULL) { |
|
1778 |
// if a phi function is never used, no interval is created -> ignore this |
|
1779 |
return; |
|
1780 |
} |
|
1781 |
||
1782 |
Interval* interval = interval_at_block_begin(block, reg_num); |
|
1783 |
int reg = interval->assigned_reg(); |
|
1784 |
int regHi = interval->assigned_regHi(); |
|
1785 |
||
1786 |
if ((reg < nof_regs && interval->always_in_memory()) || |
|
1787 |
(use_fpu_stack_allocation() && reg >= pd_first_fpu_reg && reg <= pd_last_fpu_reg)) { |
|
1788 |
// the interval is split to get a short range that is located on the stack |
|
1789 |
// in the following two cases: |
|
1790 |
// * the interval started in memory (e.g. method parameter), but is currently in a register |
|
1791 |
// this is an optimization for exception handling that reduces the number of moves that |
|
1792 |
// are necessary for resolving the states when an exception uses this exception handler |
|
1793 |
// * the interval would be on the fpu stack at the begin of the exception handler |
|
1794 |
// this is not allowed because of the complicated fpu stack handling on Intel |
|
1795 |
||
1796 |
// range that will be spilled to memory |
|
1797 |
int from_op_id = block->first_lir_instruction_id(); |
|
1798 |
int to_op_id = from_op_id + 1; // short live range of length 1 |
|
1799 |
assert(interval->from() <= from_op_id && interval->to() >= to_op_id, |
|
1800 |
"no split allowed between exception entry and first instruction"); |
|
1801 |
||
1802 |
if (interval->from() != from_op_id) { |
|
1803 |
// the part before from_op_id is unchanged |
|
1804 |
interval = interval->split(from_op_id); |
|
1805 |
interval->assign_reg(reg, regHi); |
|
1806 |
append_interval(interval); |
|
1807 |
} |
|
1808 |
assert(interval->from() == from_op_id, "must be true now"); |
|
1809 |
||
1810 |
Interval* spilled_part = interval; |
|
1811 |
if (interval->to() != to_op_id) { |
|
1812 |
// the part after to_op_id is unchanged |
|
1813 |
spilled_part = interval->split_from_start(to_op_id); |
|
1814 |
append_interval(spilled_part); |
|
1815 |
move_resolver.add_mapping(spilled_part, interval); |
|
1816 |
} |
|
1817 |
assign_spill_slot(spilled_part); |
|
1818 |
||
1819 |
assert(spilled_part->from() == from_op_id && spilled_part->to() == to_op_id, "just checking"); |
|
1820 |
} |
|
1821 |
} |
|
1822 |
||
1823 |
void LinearScan::resolve_exception_entry(BlockBegin* block, MoveResolver &move_resolver) { |
|
1824 |
assert(block->is_set(BlockBegin::exception_entry_flag), "should not call otherwise"); |
|
1825 |
DEBUG_ONLY(move_resolver.check_empty()); |
|
1826 |
||
1827 |
// visit all registers where the live_in bit is set |
|
1828 |
int size = live_set_size(); |
|
1066 | 1829 |
for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) { |
1 | 1830 |
resolve_exception_entry(block, r, move_resolver); |
1831 |
} |
|
1832 |
||
1833 |
// the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately |
|
1834 |
for_each_phi_fun(block, phi, |
|
1835 |
resolve_exception_entry(block, phi->operand()->vreg_number(), move_resolver) |
|
1836 |
); |
|
1837 |
||
1838 |
if (move_resolver.has_mappings()) { |
|
1839 |
// insert moves after first instruction |
|
1840 |
move_resolver.set_insert_position(block->lir(), 1); |
|
1841 |
move_resolver.resolve_and_append_moves(); |
|
1842 |
} |
|
1843 |
} |
|
1844 |
||
1845 |
||
1846 |
void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver) { |
|
1847 |
if (interval_at(reg_num) == NULL) { |
|
1848 |
// if a phi function is never used, no interval is created -> ignore this |
|
1849 |
return; |
|
1850 |
} |
|
1851 |
||
1852 |
// the computation of to_interval is equal to resolve_collect_mappings, |
|
1853 |
// but from_interval is more complicated because of phi functions |
|
1854 |
BlockBegin* to_block = handler->entry_block(); |
|
1855 |
Interval* to_interval = interval_at_block_begin(to_block, reg_num); |
|
1856 |
||
1857 |
if (phi != NULL) { |
|
1858 |
// phi function of the exception entry block |
|
1859 |
// no moves are created for this phi function in the LIR_Generator, so the |
|
1860 |
// interval at the throwing instruction must be searched using the operands |
|
1861 |
// of the phi function |
|
1862 |
Value from_value = phi->operand_at(handler->phi_operand()); |
|
1863 |
||
1864 |
// with phi functions it can happen that the same from_value is used in |
|
1865 |
// multiple mappings, so notify move-resolver that this is allowed |
|
1866 |
move_resolver.set_multiple_reads_allowed(); |
|
1867 |
||
1868 |
Constant* con = from_value->as_Constant(); |
|
1869 |
if (con != NULL && !con->is_pinned()) { |
|
1870 |
// unpinned constants may have no register, so add mapping from constant to interval |
|
1871 |
move_resolver.add_mapping(LIR_OprFact::value_type(con->type()), to_interval); |
|
1872 |
} else { |
|
1873 |
// search split child at the throwing op_id |
|
1874 |
Interval* from_interval = interval_at_op_id(from_value->operand()->vreg_number(), throwing_op_id); |
|
1875 |
move_resolver.add_mapping(from_interval, to_interval); |
|
1876 |
} |
|
1877 |
||
1878 |
} else { |
|
1879 |
// no phi function, so use reg_num also for from_interval |
|
1880 |
// search split child at the throwing op_id |
|
1881 |
Interval* from_interval = interval_at_op_id(reg_num, throwing_op_id); |
|
1882 |
if (from_interval != to_interval) { |
|
1883 |
// optimization to reduce number of moves: when to_interval is on stack and |
|
1884 |
// the stack slot is known to be always correct, then no move is necessary |
|
1885 |
if (!from_interval->always_in_memory() || from_interval->canonical_spill_slot() != to_interval->assigned_reg()) { |
|
1886 |
move_resolver.add_mapping(from_interval, to_interval); |
|
1887 |
} |
|
1888 |
} |
|
1889 |
} |
|
1890 |
} |
|
1891 |
||
1892 |
void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, MoveResolver &move_resolver) { |
|
1893 |
TRACE_LINEAR_SCAN(4, tty->print_cr("resolving exception handler B%d: throwing_op_id=%d", handler->entry_block()->block_id(), throwing_op_id)); |
|
1894 |
||
1895 |
DEBUG_ONLY(move_resolver.check_empty()); |
|
1896 |
assert(handler->lir_op_id() == -1, "already processed this xhandler"); |
|
1897 |
DEBUG_ONLY(handler->set_lir_op_id(throwing_op_id)); |
|
1898 |
assert(handler->entry_code() == NULL, "code already present"); |
|
1899 |
||
1900 |
// visit all registers where the live_in bit is set |
|
1901 |
BlockBegin* block = handler->entry_block(); |
|
1902 |
int size = live_set_size(); |
|
1066 | 1903 |
for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) { |
1 | 1904 |
resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver); |
1905 |
} |
|
1906 |
||
1907 |
// the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately |
|
1908 |
for_each_phi_fun(block, phi, |
|
1909 |
resolve_exception_edge(handler, throwing_op_id, phi->operand()->vreg_number(), phi, move_resolver) |
|
1910 |
); |
|
1911 |
||
1912 |
if (move_resolver.has_mappings()) { |
|
1913 |
LIR_List* entry_code = new LIR_List(compilation()); |
|
1914 |
move_resolver.set_insert_position(entry_code, 0); |
|
1915 |
move_resolver.resolve_and_append_moves(); |
|
1916 |
||
1917 |
entry_code->jump(handler->entry_block()); |
|
1918 |
handler->set_entry_code(entry_code); |
|
1919 |
} |
|
1920 |
} |
|
1921 |
||
1922 |
||
1923 |
void LinearScan::resolve_exception_handlers() { |
|
1924 |
MoveResolver move_resolver(this); |
|
1925 |
LIR_OpVisitState visitor; |
|
1926 |
int num_blocks = block_count(); |
|
1927 |
||
1928 |
int i; |
|
1929 |
for (i = 0; i < num_blocks; i++) { |
|
1930 |
BlockBegin* block = block_at(i); |
|
1931 |
if (block->is_set(BlockBegin::exception_entry_flag)) { |
|
1932 |
resolve_exception_entry(block, move_resolver); |
|
1933 |
} |
|
1934 |
} |
|
1935 |
||
1936 |
for (i = 0; i < num_blocks; i++) { |
|
1937 |
BlockBegin* block = block_at(i); |
|
1938 |
LIR_List* ops = block->lir(); |
|
1939 |
int num_ops = ops->length(); |
|
1940 |
||
1941 |
// iterate all instructions of the block. skip the first because it is always a label |
|
1942 |
assert(visitor.no_operands(ops->at(0)), "first operation must always be a label"); |
|
1943 |
for (int j = 1; j < num_ops; j++) { |
|
1944 |
LIR_Op* op = ops->at(j); |
|
1945 |
int op_id = op->id(); |
|
1946 |
||
1947 |
if (op_id != -1 && has_info(op_id)) { |
|
1948 |
// visit operation to collect all operands |
|
1949 |
visitor.visit(op); |
|
1950 |
assert(visitor.info_count() > 0, "should not visit otherwise"); |
|
1951 |
||
1952 |
XHandlers* xhandlers = visitor.all_xhandler(); |
|
1953 |
int n = xhandlers->length(); |
|
1954 |
for (int k = 0; k < n; k++) { |
|
1955 |
resolve_exception_edge(xhandlers->handler_at(k), op_id, move_resolver); |
|
1956 |
} |
|
1957 |
||
1958 |
#ifdef ASSERT |
|
1959 |
} else { |
|
1960 |
visitor.visit(op); |
|
1961 |
assert(visitor.all_xhandler()->length() == 0, "missed exception handler"); |
|
1962 |
#endif |
|
1963 |
} |
|
1964 |
} |
|
1965 |
} |
|
1966 |
} |
|
1967 |
||
1968 |
||
1969 |
// ********** Phase 7: assign register numbers back to LIR |
|
1970 |
// (includes computation of debug information and oop maps) |
|
1971 |
||
1972 |
VMReg LinearScan::vm_reg_for_interval(Interval* interval) { |
|
1973 |
VMReg reg = interval->cached_vm_reg(); |
|
1974 |
if (!reg->is_valid() ) { |
|
1975 |
reg = vm_reg_for_operand(operand_for_interval(interval)); |
|
1976 |
interval->set_cached_vm_reg(reg); |
|
1977 |
} |
|
1978 |
assert(reg == vm_reg_for_operand(operand_for_interval(interval)), "wrong cached value"); |
|
1979 |
return reg; |
|
1980 |
} |
|
1981 |
||
1982 |
VMReg LinearScan::vm_reg_for_operand(LIR_Opr opr) { |
|
1983 |
assert(opr->is_oop(), "currently only implemented for oop operands"); |
|
1984 |
return frame_map()->regname(opr); |
|
1985 |
} |
|
1986 |
||
1987 |
||
1988 |
LIR_Opr LinearScan::operand_for_interval(Interval* interval) { |
|
1989 |
LIR_Opr opr = interval->cached_opr(); |
|
1990 |
if (opr->is_illegal()) { |
|
1991 |
opr = calc_operand_for_interval(interval); |
|
1992 |
interval->set_cached_opr(opr); |
|
1993 |
} |
|
1994 |
||
1995 |
assert(opr == calc_operand_for_interval(interval), "wrong cached value"); |
|
1996 |
return opr; |
|
1997 |
} |
|
1998 |
||
1999 |
LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) { |
|
2000 |
int assigned_reg = interval->assigned_reg(); |
|
2001 |
BasicType type = interval->type(); |
|
2002 |
||
2003 |
if (assigned_reg >= nof_regs) { |
|
2004 |
// stack slot |
|
2005 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
|
2006 |
return LIR_OprFact::stack(assigned_reg - nof_regs, type); |
|
2007 |
||
2008 |
} else { |
|
2009 |
// register |
|
2010 |
switch (type) { |
|
2011 |
case T_OBJECT: { |
|
2012 |
assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register"); |
|
2013 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
|
2014 |
return LIR_OprFact::single_cpu_oop(assigned_reg); |
|
2015 |
} |
|
2016 |
||
2017 |
case T_INT: { |
|
2018 |
assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register"); |
|
2019 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
|
2020 |
return LIR_OprFact::single_cpu(assigned_reg); |
|
2021 |
} |
|
2022 |
||
2023 |
case T_LONG: { |
|
2024 |
int assigned_regHi = interval->assigned_regHi(); |
|
2025 |
assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register"); |
|
2026 |
assert(num_physical_regs(T_LONG) == 1 || |
|
2027 |
(assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register"); |
|
2028 |
||
2029 |
assert(assigned_reg != assigned_regHi, "invalid allocation"); |
|
2030 |
assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi, |
|
2031 |
"register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)"); |
|
2032 |
assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match"); |
|
2033 |
if (requires_adjacent_regs(T_LONG)) { |
|
2034 |
assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even"); |
|
2035 |
} |
|
2036 |
||
2037 |
#ifdef _LP64 |
|
2038 |
return LIR_OprFact::double_cpu(assigned_reg, assigned_reg); |
|
2039 |
#else |
|
1066 | 2040 |
#ifdef SPARC |
1 | 2041 |
return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg); |
2042 |
#else |
|
2043 |
return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi); |
|
1066 | 2044 |
#endif // SPARC |
2045 |
#endif // LP64 |
|
1 | 2046 |
} |
2047 |
||
2048 |
case T_FLOAT: { |
|
1066 | 2049 |
#ifdef X86 |
1 | 2050 |
if (UseSSE >= 1) { |
2051 |
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register"); |
|
2052 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
|
2053 |
return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg); |
|
2054 |
} |
|
2055 |
#endif |
|
2056 |
||
2057 |
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register"); |
|
2058 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
|
2059 |
return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg); |
|
2060 |
} |
|
2061 |
||
2062 |
case T_DOUBLE: { |
|
1066 | 2063 |
#ifdef X86 |
1 | 2064 |
if (UseSSE >= 2) { |
2065 |
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register"); |
|
2066 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)"); |
|
2067 |
return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg); |
|
2068 |
} |
|
2069 |
#endif |
|
2070 |
||
2071 |
#ifdef SPARC |
|
2072 |
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register"); |
|
2073 |
assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register"); |
|
2074 |
assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even"); |
|
2075 |
LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg); |
|
2076 |
#else |
|
2077 |
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register"); |
|
2078 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)"); |
|
2079 |
LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg); |
|
2080 |
#endif |
|
2081 |
return result; |
|
2082 |
} |
|
2083 |
||
2084 |
default: { |
|
2085 |
ShouldNotReachHere(); |
|
2086 |
return LIR_OprFact::illegalOpr; |
|
2087 |
} |
|
2088 |
} |
|
2089 |
} |
|
2090 |
} |
|
2091 |
||
2092 |
LIR_Opr LinearScan::canonical_spill_opr(Interval* interval) { |
|
2093 |
assert(interval->canonical_spill_slot() >= nof_regs, "canonical spill slot not set"); |
|
2094 |
return LIR_OprFact::stack(interval->canonical_spill_slot() - nof_regs, interval->type()); |
|
2095 |
} |
|
2096 |
||
2097 |
LIR_Opr LinearScan::color_lir_opr(LIR_Opr opr, int op_id, LIR_OpVisitState::OprMode mode) { |
|
2098 |
assert(opr->is_virtual(), "should not call this otherwise"); |
|
2099 |
||
2100 |
Interval* interval = interval_at(opr->vreg_number()); |
|
2101 |
assert(interval != NULL, "interval must exist"); |
|
2102 |
||
2103 |
if (op_id != -1) { |
|
2104 |
#ifdef ASSERT |
|
2105 |
BlockBegin* block = block_of_op_with_id(op_id); |
|
2106 |
if (block->number_of_sux() <= 1 && op_id == block->last_lir_instruction_id()) { |
|
2107 |
// check if spill moves could have been appended at the end of this block, but |
|
2108 |
// before the branch instruction. So the split child information for this branch would |
|
2109 |
// be incorrect. |
|
2110 |
LIR_OpBranch* branch = block->lir()->instructions_list()->last()->as_OpBranch(); |
|
2111 |
if (branch != NULL) { |
|
2112 |
if (block->live_out().at(opr->vreg_number())) { |
|
2113 |
assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump"); |
|
2114 |
assert(false, "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolve_data_flow)"); |
|
2115 |
} |
|
2116 |
} |
|
2117 |
} |
|
2118 |
#endif |
|
2119 |
||
2120 |
// operands are not changed when an interval is split during allocation, |
|
2121 |
// so search the right interval here |
|
2122 |
interval = split_child_at_op_id(interval, op_id, mode); |
|
2123 |
} |
|
2124 |
||
2125 |
LIR_Opr res = operand_for_interval(interval); |
|
2126 |
||
1066 | 2127 |
#ifdef X86 |
1 | 2128 |
// new semantic for is_last_use: not only set on definite end of interval, |
2129 |
// but also before hole |
|
2130 |
// This may still miss some cases (e.g. for dead values), but it is not necessary that the |
|
2131 |
// last use information is completely correct |
|
2132 |
// information is only needed for fpu stack allocation |
|
2133 |
if (res->is_fpu_register()) { |
|
2134 |
if (opr->is_last_use() || op_id == interval->to() || (op_id != -1 && interval->has_hole_between(op_id, op_id + 1))) { |
|
2135 |
assert(op_id == -1 || !is_block_begin(op_id), "holes at begin of block may also result from control flow"); |
|
2136 |
res = res->make_last_use(); |
|
2137 |
} |
|
2138 |
} |
|
2139 |
#endif |
|
2140 |
||
2141 |
assert(!gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::callee_saved) || !FrameMap::is_caller_save_register(res), "bad allocation"); |
|
2142 |
||
2143 |
return res; |
|
2144 |
} |
|
2145 |
||
2146 |
||
2147 |
#ifdef ASSERT |
|
2148 |
// some methods used to check correctness of debug information |
|
2149 |
||
2150 |
void assert_no_register_values(GrowableArray<ScopeValue*>* values) { |
|
2151 |
if (values == NULL) { |
|
2152 |
return; |
|
2153 |
} |
|
2154 |
||
2155 |
for (int i = 0; i < values->length(); i++) { |
|
2156 |
ScopeValue* value = values->at(i); |
|
2157 |
||
2158 |
if (value->is_location()) { |
|
2159 |
Location location = ((LocationValue*)value)->location(); |
|
2160 |
assert(location.where() == Location::on_stack, "value is in register"); |
|
2161 |
} |
|
2162 |
} |
|
2163 |
} |
|
2164 |
||
2165 |
void assert_no_register_values(GrowableArray<MonitorValue*>* values) { |
|
2166 |
if (values == NULL) { |
|
2167 |
return; |
|
2168 |
} |
|
2169 |
||
2170 |
for (int i = 0; i < values->length(); i++) { |
|
2171 |
MonitorValue* value = values->at(i); |
|
2172 |
||
2173 |
if (value->owner()->is_location()) { |
|
2174 |
Location location = ((LocationValue*)value->owner())->location(); |
|
2175 |
assert(location.where() == Location::on_stack, "owner is in register"); |
|
2176 |
} |
|
2177 |
assert(value->basic_lock().where() == Location::on_stack, "basic_lock is in register"); |
|
2178 |
} |
|
2179 |
} |
|
2180 |
||
2181 |
void assert_equal(Location l1, Location l2) { |
|
2182 |
assert(l1.where() == l2.where() && l1.type() == l2.type() && l1.offset() == l2.offset(), ""); |
|
2183 |
} |
|
2184 |
||
2185 |
void assert_equal(ScopeValue* v1, ScopeValue* v2) { |
|
2186 |
if (v1->is_location()) { |
|
2187 |
assert(v2->is_location(), ""); |
|
2188 |
assert_equal(((LocationValue*)v1)->location(), ((LocationValue*)v2)->location()); |
|
2189 |
} else if (v1->is_constant_int()) { |
|
2190 |
assert(v2->is_constant_int(), ""); |
|
2191 |
assert(((ConstantIntValue*)v1)->value() == ((ConstantIntValue*)v2)->value(), ""); |
|
2192 |
} else if (v1->is_constant_double()) { |
|
2193 |
assert(v2->is_constant_double(), ""); |
|
2194 |
assert(((ConstantDoubleValue*)v1)->value() == ((ConstantDoubleValue*)v2)->value(), ""); |
|
2195 |
} else if (v1->is_constant_long()) { |
|
2196 |
assert(v2->is_constant_long(), ""); |
|
2197 |
assert(((ConstantLongValue*)v1)->value() == ((ConstantLongValue*)v2)->value(), ""); |
|
2198 |
} else if (v1->is_constant_oop()) { |
|
2199 |
assert(v2->is_constant_oop(), ""); |
|
2200 |
assert(((ConstantOopWriteValue*)v1)->value() == ((ConstantOopWriteValue*)v2)->value(), ""); |
|
2201 |
} else { |
|
2202 |
ShouldNotReachHere(); |
|
2203 |
} |
|
2204 |
} |
|
2205 |
||
2206 |
void assert_equal(MonitorValue* m1, MonitorValue* m2) { |
|
2207 |
assert_equal(m1->owner(), m2->owner()); |
|
2208 |
assert_equal(m1->basic_lock(), m2->basic_lock()); |
|
2209 |
} |
|
2210 |
||
2211 |
void assert_equal(IRScopeDebugInfo* d1, IRScopeDebugInfo* d2) { |
|
2212 |
assert(d1->scope() == d2->scope(), "not equal"); |
|
2213 |
assert(d1->bci() == d2->bci(), "not equal"); |
|
2214 |
||
2215 |
if (d1->locals() != NULL) { |
|
2216 |
assert(d1->locals() != NULL && d2->locals() != NULL, "not equal"); |
|
2217 |
assert(d1->locals()->length() == d2->locals()->length(), "not equal"); |
|
2218 |
for (int i = 0; i < d1->locals()->length(); i++) { |
|
2219 |
assert_equal(d1->locals()->at(i), d2->locals()->at(i)); |
|
2220 |
} |
|
2221 |
} else { |
|
2222 |
assert(d1->locals() == NULL && d2->locals() == NULL, "not equal"); |
|
2223 |
} |
|
2224 |
||
2225 |
if (d1->expressions() != NULL) { |
|
2226 |
assert(d1->expressions() != NULL && d2->expressions() != NULL, "not equal"); |
|
2227 |
assert(d1->expressions()->length() == d2->expressions()->length(), "not equal"); |
|
2228 |
for (int i = 0; i < d1->expressions()->length(); i++) { |
|
2229 |
assert_equal(d1->expressions()->at(i), d2->expressions()->at(i)); |
|
2230 |
} |
|
2231 |
} else { |
|
2232 |
assert(d1->expressions() == NULL && d2->expressions() == NULL, "not equal"); |
|
2233 |
} |
|
2234 |
||
2235 |
if (d1->monitors() != NULL) { |
|
2236 |
assert(d1->monitors() != NULL && d2->monitors() != NULL, "not equal"); |
|
2237 |
assert(d1->monitors()->length() == d2->monitors()->length(), "not equal"); |
|
2238 |
for (int i = 0; i < d1->monitors()->length(); i++) { |
|
2239 |
assert_equal(d1->monitors()->at(i), d2->monitors()->at(i)); |
|
2240 |
} |
|
2241 |
} else { |
|
2242 |
assert(d1->monitors() == NULL && d2->monitors() == NULL, "not equal"); |
|
2243 |
} |
|
2244 |
||
2245 |
if (d1->caller() != NULL) { |
|
2246 |
assert(d1->caller() != NULL && d2->caller() != NULL, "not equal"); |
|
2247 |
assert_equal(d1->caller(), d2->caller()); |
|
2248 |
} else { |
|
2249 |
assert(d1->caller() == NULL && d2->caller() == NULL, "not equal"); |
|
2250 |
} |
|
2251 |
} |
|
2252 |
||
2253 |
void check_stack_depth(CodeEmitInfo* info, int stack_end) { |
|
2254 |
if (info->bci() != SynchronizationEntryBCI && !info->scope()->method()->is_native()) { |
|
2255 |
Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->bci()); |
|
2256 |
switch (code) { |
|
2257 |
case Bytecodes::_ifnull : // fall through |
|
2258 |
case Bytecodes::_ifnonnull : // fall through |
|
2259 |
case Bytecodes::_ifeq : // fall through |
|
2260 |
case Bytecodes::_ifne : // fall through |
|
2261 |
case Bytecodes::_iflt : // fall through |
|
2262 |
case Bytecodes::_ifge : // fall through |
|
2263 |
case Bytecodes::_ifgt : // fall through |
|
2264 |
case Bytecodes::_ifle : // fall through |
|
2265 |
case Bytecodes::_if_icmpeq : // fall through |
|
2266 |
case Bytecodes::_if_icmpne : // fall through |
|
2267 |
case Bytecodes::_if_icmplt : // fall through |
|
2268 |
case Bytecodes::_if_icmpge : // fall through |
|
2269 |
case Bytecodes::_if_icmpgt : // fall through |
|
2270 |
case Bytecodes::_if_icmple : // fall through |
|
2271 |
case Bytecodes::_if_acmpeq : // fall through |
|
2272 |
case Bytecodes::_if_acmpne : |
|
2273 |
assert(stack_end >= -Bytecodes::depth(code), "must have non-empty expression stack at if bytecode"); |
|
2274 |
break; |
|
2275 |
} |
|
2276 |
} |
|
2277 |
} |
|
2278 |
||
2279 |
#endif // ASSERT |
|
2280 |
||
2281 |
||
2282 |
IntervalWalker* LinearScan::init_compute_oop_maps() { |
|
2283 |
// setup lists of potential oops for walking |
|
2284 |
Interval* oop_intervals; |
|
2285 |
Interval* non_oop_intervals; |
|
2286 |
||
2287 |
create_unhandled_lists(&oop_intervals, &non_oop_intervals, is_oop_interval, NULL); |
|
2288 |
||
2289 |
// intervals that have no oops inside need not to be processed |
|
2290 |
// to ensure a walking until the last instruction id, add a dummy interval |
|
2291 |
// with a high operation id |
|
2292 |
non_oop_intervals = new Interval(any_reg); |
|
2293 |
non_oop_intervals->add_range(max_jint - 2, max_jint - 1); |
|
2294 |
||
2295 |
return new IntervalWalker(this, oop_intervals, non_oop_intervals); |
|
2296 |
} |
|
2297 |
||
2298 |
||
2299 |
OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo* info, bool is_call_site) { |
|
2300 |
TRACE_LINEAR_SCAN(3, tty->print_cr("creating oop map at op_id %d", op->id())); |
|
2301 |
||
2302 |
// walk before the current operation -> intervals that start at |
|
2303 |
// the operation (= output operands of the operation) are not |
|
2304 |
// included in the oop map |
|
2305 |
iw->walk_before(op->id()); |
|
2306 |
||
2307 |
int frame_size = frame_map()->framesize(); |
|
2308 |
int arg_count = frame_map()->oop_map_arg_count(); |
|
2309 |
OopMap* map = new OopMap(frame_size, arg_count); |
|
2310 |
||
2311 |
// Check if this is a patch site. |
|
2312 |
bool is_patch_info = false; |
|
2313 |
if (op->code() == lir_move) { |
|
2314 |
assert(!is_call_site, "move must not be a call site"); |
|
2315 |
assert(op->as_Op1() != NULL, "move must be LIR_Op1"); |
|
2316 |
LIR_Op1* move = (LIR_Op1*)op; |
|
2317 |
||
2318 |
is_patch_info = move->patch_code() != lir_patch_none; |
|
2319 |
} |
|
2320 |
||
2321 |
// Iterate through active intervals |
|
2322 |
for (Interval* interval = iw->active_first(fixedKind); interval != Interval::end(); interval = interval->next()) { |
|
2323 |
int assigned_reg = interval->assigned_reg(); |
|
2324 |
||
2325 |
assert(interval->current_from() <= op->id() && op->id() <= interval->current_to(), "interval should not be active otherwise"); |
|
2326 |
assert(interval->assigned_regHi() == any_reg, "oop must be single word"); |
|
2327 |
assert(interval->reg_num() >= LIR_OprDesc::vreg_base, "fixed interval found"); |
|
2328 |
||
2329 |
// Check if this range covers the instruction. Intervals that |
|
2330 |
// start or end at the current operation are not included in the |
|
2331 |
// oop map, except in the case of patching moves. For patching |
|
2332 |
// moves, any intervals which end at this instruction are included |
|
2333 |
// in the oop map since we may safepoint while doing the patch |
|
2334 |
// before we've consumed the inputs. |
|
2335 |
if (is_patch_info || op->id() < interval->current_to()) { |
|
2336 |
||
2337 |
// caller-save registers must not be included into oop-maps at calls |
|
2338 |
assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten"); |
|
2339 |
||
2340 |
VMReg name = vm_reg_for_interval(interval); |
|
2341 |
map->set_oop(name); |
|
2342 |
||
2343 |
// Spill optimization: when the stack value is guaranteed to be always correct, |
|
2344 |
// then it must be added to the oop map even if the interval is currently in a register |
|
2345 |
if (interval->always_in_memory() && |
|
2346 |
op->id() > interval->spill_definition_pos() && |
|
2347 |
interval->assigned_reg() != interval->canonical_spill_slot()) { |
|
2348 |
assert(interval->spill_definition_pos() > 0, "position not set correctly"); |
|
2349 |
assert(interval->canonical_spill_slot() >= LinearScan::nof_regs, "no spill slot assigned"); |
|
2350 |
assert(interval->assigned_reg() < LinearScan::nof_regs, "interval is on stack, so stack slot is registered twice"); |
|
2351 |
||
2352 |
map->set_oop(frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs)); |
|
2353 |
} |
|
2354 |
} |
|
2355 |
} |
|
2356 |
||
2357 |
// add oops from lock stack |
|
2358 |
assert(info->stack() != NULL, "CodeEmitInfo must always have a stack"); |
|
2359 |
int locks_count = info->stack()->locks_size(); |
|
2360 |
for (int i = 0; i < locks_count; i++) { |
|
2361 |
map->set_oop(frame_map()->monitor_object_regname(i)); |
|
2362 |
} |
|
2363 |
||
2364 |
return map; |
|
2365 |
} |
|
2366 |
||
2367 |
||
2368 |
void LinearScan::compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &visitor, LIR_Op* op) { |
|
2369 |
assert(visitor.info_count() > 0, "no oop map needed"); |
|
2370 |
||
2371 |
// compute oop_map only for first CodeEmitInfo |
|
2372 |
// because it is (in most cases) equal for all other infos of the same operation |
|
2373 |
CodeEmitInfo* first_info = visitor.info_at(0); |
|
2374 |
OopMap* first_oop_map = compute_oop_map(iw, op, first_info, visitor.has_call()); |
|
2375 |
||
2376 |
for (int i = 0; i < visitor.info_count(); i++) { |
|
2377 |
CodeEmitInfo* info = visitor.info_at(i); |
|
2378 |
OopMap* oop_map = first_oop_map; |
|
2379 |
||
2380 |
if (info->stack()->locks_size() != first_info->stack()->locks_size()) { |
|
2381 |
// this info has a different number of locks then the precomputed oop map |
|
2382 |
// (possible for lock and unlock instructions) -> compute oop map with |
|
2383 |
// correct lock information |
|
2384 |
oop_map = compute_oop_map(iw, op, info, visitor.has_call()); |
|
2385 |
} |
|
2386 |
||
2387 |
if (info->_oop_map == NULL) { |
|
2388 |
info->_oop_map = oop_map; |
|
2389 |
} else { |
|
2390 |
// a CodeEmitInfo can not be shared between different LIR-instructions |
|
2391 |
// because interval splitting can occur anywhere between two instructions |
|
2392 |
// and so the oop maps must be different |
|
2393 |
// -> check if the already set oop_map is exactly the one calculated for this operation |
|
2394 |
assert(info->_oop_map == oop_map, "same CodeEmitInfo used for multiple LIR instructions"); |
|
2395 |
} |
|
2396 |
} |
|
2397 |
} |
|
2398 |
||
2399 |
||
2400 |
// frequently used constants |
|
2401 |
ConstantOopWriteValue LinearScan::_oop_null_scope_value = ConstantOopWriteValue(NULL); |
|
2402 |
ConstantIntValue LinearScan::_int_m1_scope_value = ConstantIntValue(-1); |
|
2403 |
ConstantIntValue LinearScan::_int_0_scope_value = ConstantIntValue(0); |
|
2404 |
ConstantIntValue LinearScan::_int_1_scope_value = ConstantIntValue(1); |
|
2405 |
ConstantIntValue LinearScan::_int_2_scope_value = ConstantIntValue(2); |
|
2406 |
LocationValue _illegal_value = LocationValue(Location()); |
|
2407 |
||
2408 |
void LinearScan::init_compute_debug_info() { |
|
2409 |
// cache for frequently used scope values |
|
2410 |
// (cpu registers and stack slots) |
|
2411 |
_scope_value_cache = ScopeValueArray((LinearScan::nof_cpu_regs + frame_map()->argcount() + max_spills()) * 2, NULL); |
|
2412 |
} |
|
2413 |
||
2414 |
MonitorValue* LinearScan::location_for_monitor_index(int monitor_index) { |
|
2415 |
Location loc; |
|
2416 |
if (!frame_map()->location_for_monitor_object(monitor_index, &loc)) { |
|
2417 |
bailout("too large frame"); |
|
2418 |
} |
|
2419 |
ScopeValue* object_scope_value = new LocationValue(loc); |
|
2420 |
||
2421 |
if (!frame_map()->location_for_monitor_lock(monitor_index, &loc)) { |
|
2422 |
bailout("too large frame"); |
|
2423 |
} |
|
2424 |
return new MonitorValue(object_scope_value, loc); |
|
2425 |
} |
|
2426 |
||
2427 |
LocationValue* LinearScan::location_for_name(int name, Location::Type loc_type) { |
|
2428 |
Location loc; |
|
2429 |
if (!frame_map()->locations_for_slot(name, loc_type, &loc)) { |
|
2430 |
bailout("too large frame"); |
|
2431 |
} |
|
2432 |
return new LocationValue(loc); |
|
2433 |
} |
|
2434 |
||
2435 |
||
2436 |
int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) { |
|
2437 |
assert(opr->is_constant(), "should not be called otherwise"); |
|
2438 |
||
2439 |
LIR_Const* c = opr->as_constant_ptr(); |
|
2440 |
BasicType t = c->type(); |
|
2441 |
switch (t) { |
|
2442 |
case T_OBJECT: { |
|
2443 |
jobject value = c->as_jobject(); |
|
2444 |
if (value == NULL) { |
|
2445 |
scope_values->append(&_oop_null_scope_value); |
|
2446 |
} else { |
|
2447 |
scope_values->append(new ConstantOopWriteValue(c->as_jobject())); |
|
2448 |
} |
|
2449 |
return 1; |
|
2450 |
} |
|
2451 |
||
2452 |
case T_INT: // fall through |
|
2453 |
case T_FLOAT: { |
|
2454 |
int value = c->as_jint_bits(); |
|
2455 |
switch (value) { |
|
2456 |
case -1: scope_values->append(&_int_m1_scope_value); break; |
|
2457 |
case 0: scope_values->append(&_int_0_scope_value); break; |
|
2458 |
case 1: scope_values->append(&_int_1_scope_value); break; |
|
2459 |
case 2: scope_values->append(&_int_2_scope_value); break; |
|
2460 |
default: scope_values->append(new ConstantIntValue(c->as_jint_bits())); break; |
|
2461 |
} |
|
2462 |
return 1; |
|
2463 |
} |
|
2464 |
||
2465 |
case T_LONG: // fall through |
|
2466 |
case T_DOUBLE: { |
|
2467 |
if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) { |
|
2468 |
scope_values->append(new ConstantIntValue(c->as_jint_hi_bits())); |
|
2469 |
scope_values->append(new ConstantIntValue(c->as_jint_lo_bits())); |
|
2470 |
} else { |
|
2471 |
scope_values->append(new ConstantIntValue(c->as_jint_lo_bits())); |
|
2472 |
scope_values->append(new ConstantIntValue(c->as_jint_hi_bits())); |
|
2473 |
} |
|
2474 |
||
2475 |
return 2; |
|
2476 |
} |
|
2477 |
||
2478 |
default: |
|
2479 |
ShouldNotReachHere(); |
|
1066 | 2480 |
return -1; |
1 | 2481 |
} |
2482 |
} |
|
2483 |
||
2484 |
int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) { |
|
2485 |
if (opr->is_single_stack()) { |
|
2486 |
int stack_idx = opr->single_stack_ix(); |
|
2487 |
bool is_oop = opr->is_oop_register(); |
|
2488 |
int cache_idx = (stack_idx + LinearScan::nof_cpu_regs) * 2 + (is_oop ? 1 : 0); |
|
2489 |
||
2490 |
ScopeValue* sv = _scope_value_cache.at(cache_idx); |
|
2491 |
if (sv == NULL) { |
|
2492 |
Location::Type loc_type = is_oop ? Location::oop : Location::normal; |
|
2493 |
sv = location_for_name(stack_idx, loc_type); |
|
2494 |
_scope_value_cache.at_put(cache_idx, sv); |
|
2495 |
} |
|
2496 |
||
2497 |
// check if cached value is correct |
|
2498 |
DEBUG_ONLY(assert_equal(sv, location_for_name(stack_idx, is_oop ? Location::oop : Location::normal))); |
|
2499 |
||
2500 |
scope_values->append(sv); |
|
2501 |
return 1; |
|
2502 |
||
2503 |
} else if (opr->is_single_cpu()) { |
|
2504 |
bool is_oop = opr->is_oop_register(); |
|
2505 |
int cache_idx = opr->cpu_regnr() * 2 + (is_oop ? 1 : 0); |
|
2506 |
||
2507 |
ScopeValue* sv = _scope_value_cache.at(cache_idx); |
|
2508 |
if (sv == NULL) { |
|
2509 |
Location::Type loc_type = is_oop ? Location::oop : Location::normal; |
|
2510 |
VMReg rname = frame_map()->regname(opr); |
|
2511 |
sv = new LocationValue(Location::new_reg_loc(loc_type, rname)); |
|
2512 |
_scope_value_cache.at_put(cache_idx, sv); |
|
2513 |
} |
|
2514 |
||
2515 |
// check if cached value is correct |
|
2516 |
DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : Location::normal, frame_map()->regname(opr))))); |
|
2517 |
||
2518 |
scope_values->append(sv); |
|
2519 |
return 1; |
|
2520 |
||
1066 | 2521 |
#ifdef X86 |
1 | 2522 |
} else if (opr->is_single_xmm()) { |
2523 |
VMReg rname = opr->as_xmm_float_reg()->as_VMReg(); |
|
2524 |
LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname)); |
|
2525 |
||
2526 |
scope_values->append(sv); |
|
2527 |
return 1; |
|
2528 |
#endif |
|
2529 |
||
2530 |
} else if (opr->is_single_fpu()) { |
|
1066 | 2531 |
#ifdef X86 |
1 | 2532 |
// the exact location of fpu stack values is only known |
2533 |
// during fpu stack allocation, so the stack allocator object |
|
2534 |
// must be present |
|
2535 |
assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)"); |
|
2536 |
assert(_fpu_stack_allocator != NULL, "must be present"); |
|
2537 |
opr = _fpu_stack_allocator->to_fpu_stack(opr); |
|
2538 |
#endif |
|
2539 |
||
2540 |
Location::Type loc_type = float_saved_as_double ? Location::float_in_dbl : Location::normal; |
|
2541 |
VMReg rname = frame_map()->fpu_regname(opr->fpu_regnr()); |
|
2542 |
LocationValue* sv = new LocationValue(Location::new_reg_loc(loc_type, rname)); |
|
2543 |
||
2544 |
scope_values->append(sv); |
|
2545 |
return 1; |
|
2546 |
||
2547 |
} else { |
|
2548 |
// double-size operands |
|
2549 |
||
2550 |
ScopeValue* first; |
|
2551 |
ScopeValue* second; |
|
2552 |
||
2553 |
if (opr->is_double_stack()) { |
|
1066 | 2554 |
#ifdef _LP64 |
2555 |
Location loc1; |
|
2556 |
Location::Type loc_type = opr->type() == T_LONG ? Location::lng : Location::dbl; |
|
2557 |
if (!frame_map()->locations_for_slot(opr->double_stack_ix(), loc_type, &loc1, NULL)) { |
|
2558 |
bailout("too large frame"); |
|
2559 |
} |
|
2560 |
// Does this reverse on x86 vs. sparc? |
|
2561 |
first = new LocationValue(loc1); |
|
2562 |
second = &_int_0_scope_value; |
|
2563 |
#else |
|
1 | 2564 |
Location loc1, loc2; |
2565 |
if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) { |
|
2566 |
bailout("too large frame"); |
|
2567 |
} |
|
2568 |
first = new LocationValue(loc1); |
|
2569 |
second = new LocationValue(loc2); |
|
1066 | 2570 |
#endif // _LP64 |
1 | 2571 |
|
2572 |
} else if (opr->is_double_cpu()) { |
|
2573 |
#ifdef _LP64 |
|
2574 |
VMReg rname_first = opr->as_register_lo()->as_VMReg(); |
|
2575 |
first = new LocationValue(Location::new_reg_loc(Location::lng, rname_first)); |
|
2576 |
second = &_int_0_scope_value; |
|
2577 |
#else |
|
2578 |
VMReg rname_first = opr->as_register_lo()->as_VMReg(); |
|
2579 |
VMReg rname_second = opr->as_register_hi()->as_VMReg(); |
|
2580 |
||
2581 |
if (hi_word_offset_in_bytes < lo_word_offset_in_bytes) { |
|
2582 |
// lo/hi and swapped relative to first and second, so swap them |
|
2583 |
VMReg tmp = rname_first; |
|
2584 |
rname_first = rname_second; |
|
2585 |
rname_second = tmp; |
|
2586 |
} |
|
2587 |
||
2588 |
first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); |
|
2589 |
second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); |
|
1066 | 2590 |
#endif //_LP64 |
2591 |
||
2592 |
||
2593 |
#ifdef X86 |
|
1 | 2594 |
} else if (opr->is_double_xmm()) { |
2595 |
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation"); |
|
2596 |
VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg(); |
|
2597 |
first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); |
|
2598 |
// %%% This is probably a waste but we'll keep things as they were for now |
|
2599 |
if (true) { |
|
2600 |
VMReg rname_second = rname_first->next(); |
|
2601 |
second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); |
|
2602 |
} |
|
2603 |
#endif |
|
2604 |
||
2605 |
} else if (opr->is_double_fpu()) { |
|
2606 |
// On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of |
|
1066 | 2607 |
// the double as float registers in the native ordering. On X86, |
1 | 2608 |
// fpu_regnrLo is a FPU stack slot whose VMReg represents |
2609 |
// the low-order word of the double and fpu_regnrLo + 1 is the |
|
2610 |
// name for the other half. *first and *second must represent the |
|
2611 |
// least and most significant words, respectively. |
|
2612 |
||
1066 | 2613 |
#ifdef X86 |
1 | 2614 |
// the exact location of fpu stack values is only known |
2615 |
// during fpu stack allocation, so the stack allocator object |
|
2616 |
// must be present |
|
2617 |
assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)"); |
|
2618 |
assert(_fpu_stack_allocator != NULL, "must be present"); |
|
2619 |
opr = _fpu_stack_allocator->to_fpu_stack(opr); |
|
2620 |
||
2621 |
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)"); |
|
2622 |
#endif |
|
2623 |
#ifdef SPARC |
|
2624 |
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)"); |
|
2625 |
#endif |
|
2626 |
||
2627 |
VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi()); |
|
2628 |
||
2629 |
first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); |
|
2630 |
// %%% This is probably a waste but we'll keep things as they were for now |
|
2631 |
if (true) { |
|
2632 |
VMReg rname_second = rname_first->next(); |
|
2633 |
second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); |
|
2634 |
} |
|
2635 |
||
2636 |
} else { |
|
2637 |
ShouldNotReachHere(); |
|
2638 |
first = NULL; |
|
2639 |
second = NULL; |
|
2640 |
} |
|
2641 |
||
2642 |
assert(first != NULL && second != NULL, "must be set"); |
|
2643 |
// The convention the interpreter uses is that the second local |
|
2644 |
// holds the first raw word of the native double representation. |
|
2645 |
// This is actually reasonable, since locals and stack arrays |
|
2646 |
// grow downwards in all implementations. |
|
2647 |
// (If, on some machine, the interpreter's Java locals or stack |
|
2648 |
// were to grow upwards, the embedded doubles would be word-swapped.) |
|
2649 |
scope_values->append(second); |
|
2650 |
scope_values->append(first); |
|
2651 |
return 2; |
|
2652 |
} |
|
2653 |
} |
|
2654 |
||
2655 |
||
2656 |
int LinearScan::append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values) { |
|
2657 |
if (value != NULL) { |
|
2658 |
LIR_Opr opr = value->operand(); |
|
2659 |
Constant* con = value->as_Constant(); |
|
2660 |
||
2661 |
assert(con == NULL || opr->is_virtual() || opr->is_constant() || opr->is_illegal(), "asumption: Constant instructions have only constant operands (or illegal if constant is optimized away)"); |
|
2662 |
assert(con != NULL || opr->is_virtual(), "asumption: non-Constant instructions have only virtual operands"); |
|
2663 |
||
2664 |
if (con != NULL && !con->is_pinned() && !opr->is_constant()) { |
|
2665 |
// Unpinned constants may have a virtual operand for a part of the lifetime |
|
2666 |
// or may be illegal when it was optimized away, |
|
2667 |
// so always use a constant operand |
|
2668 |
opr = LIR_OprFact::value_type(con->type()); |
|
2669 |
} |
|
2670 |
assert(opr->is_virtual() || opr->is_constant(), "other cases not allowed here"); |
|
2671 |
||
2672 |
if (opr->is_virtual()) { |
|
2673 |
LIR_OpVisitState::OprMode mode = LIR_OpVisitState::inputMode; |
|
2674 |
||
2675 |
BlockBegin* block = block_of_op_with_id(op_id); |
|
2676 |
if (block->number_of_sux() == 1 && op_id == block->last_lir_instruction_id()) { |
|
2677 |
// generating debug information for the last instruction of a block. |
|
2678 |
// if this instruction is a branch, spill moves are inserted before this branch |
|
2679 |
// and so the wrong operand would be returned (spill moves at block boundaries are not |
|
2680 |
// considered in the live ranges of intervals) |
|
2681 |
// Solution: use the first op_id of the branch target block instead. |
|
2682 |
if (block->lir()->instructions_list()->last()->as_OpBranch() != NULL) { |
|
2683 |
if (block->live_out().at(opr->vreg_number())) { |
|
2684 |
op_id = block->sux_at(0)->first_lir_instruction_id(); |
|
2685 |
mode = LIR_OpVisitState::outputMode; |
|
2686 |
} |
|
2687 |
} |
|
2688 |
} |
|
2689 |
||
2690 |
// Get current location of operand |
|
2691 |
// The operand must be live because debug information is considered when building the intervals |
|
2692 |
// if the interval is not live, color_lir_opr will cause an assertion failure |
|
2693 |
opr = color_lir_opr(opr, op_id, mode); |
|
2694 |
assert(!has_call(op_id) || opr->is_stack() || !is_caller_save(reg_num(opr)), "can not have caller-save register operands at calls"); |
|
2695 |
||
2696 |
// Append to ScopeValue array |
|
2697 |
return append_scope_value_for_operand(opr, scope_values); |
|
2698 |
||
2699 |
} else { |
|
2700 |
assert(value->as_Constant() != NULL, "all other instructions have only virtual operands"); |
|
2701 |
assert(opr->is_constant(), "operand must be constant"); |
|
2702 |
||
2703 |
return append_scope_value_for_constant(opr, scope_values); |
|
2704 |
} |
|
2705 |
} else { |
|
2706 |
// append a dummy value because real value not needed |
|
2707 |
scope_values->append(&_illegal_value); |
|
2708 |
return 1; |
|
2709 |
} |
|
2710 |
} |
|
2711 |
||
2712 |
||
2713 |
IRScopeDebugInfo* LinearScan::compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state, int cur_bci, int stack_end, int locks_end) { |
|
2714 |
IRScopeDebugInfo* caller_debug_info = NULL; |
|
2715 |
int stack_begin, locks_begin; |
|
2716 |
||
2717 |
ValueStack* caller_state = cur_scope->caller_state(); |
|
2718 |
if (caller_state != NULL) { |
|
2719 |
// process recursively to compute outermost scope first |
|
2720 |
stack_begin = caller_state->stack_size(); |
|
2721 |
locks_begin = caller_state->locks_size(); |
|
2722 |
caller_debug_info = compute_debug_info_for_scope(op_id, cur_scope->caller(), caller_state, innermost_state, cur_scope->caller_bci(), stack_begin, locks_begin); |
|
2723 |
} else { |
|
2724 |
stack_begin = 0; |
|
2725 |
locks_begin = 0; |
|
2726 |
} |
|
2727 |
||
2728 |
// initialize these to null. |
|
2729 |
// If we don't need deopt info or there are no locals, expressions or monitors, |
|
2730 |
// then these get recorded as no information and avoids the allocation of 0 length arrays. |
|
2731 |
GrowableArray<ScopeValue*>* locals = NULL; |
|
2732 |
GrowableArray<ScopeValue*>* expressions = NULL; |
|
2733 |
GrowableArray<MonitorValue*>* monitors = NULL; |
|
2734 |
||
2735 |
// describe local variable values |
|
2736 |
int nof_locals = cur_scope->method()->max_locals(); |
|
2737 |
if (nof_locals > 0) { |
|
2738 |
locals = new GrowableArray<ScopeValue*>(nof_locals); |
|
2739 |
||
2740 |
int pos = 0; |
|
2741 |
while (pos < nof_locals) { |
|
2742 |
assert(pos < cur_state->locals_size(), "why not?"); |
|
2743 |
||
2744 |
Value local = cur_state->local_at(pos); |
|
2745 |
pos += append_scope_value(op_id, local, locals); |
|
2746 |
||
2747 |
assert(locals->length() == pos, "must match"); |
|
2748 |
} |
|
2749 |
assert(locals->length() == cur_scope->method()->max_locals(), "wrong number of locals"); |
|
2750 |
assert(locals->length() == cur_state->locals_size(), "wrong number of locals"); |
|
2751 |
} |
|
2752 |
||
2753 |
||
2754 |
// describe expression stack |
|
2755 |
// |
|
2756 |
// When we inline methods containing exception handlers, the |
|
2757 |
// "lock_stacks" are changed to preserve expression stack values |
|
2758 |
// in caller scopes when exception handlers are present. This |
|
2759 |
// can cause callee stacks to be smaller than caller stacks. |
|
2760 |
if (stack_end > innermost_state->stack_size()) { |
|
2761 |
stack_end = innermost_state->stack_size(); |
|
2762 |
} |
|
2763 |
||
2764 |
||
2765 |
||
2766 |
int nof_stack = stack_end - stack_begin; |
|
2767 |
if (nof_stack > 0) { |
|
2768 |
expressions = new GrowableArray<ScopeValue*>(nof_stack); |
|
2769 |
||
2770 |
int pos = stack_begin; |
|
2771 |
while (pos < stack_end) { |
|
2772 |
Value expression = innermost_state->stack_at_inc(pos); |
|
2773 |
append_scope_value(op_id, expression, expressions); |
|
2774 |
||
2775 |
assert(expressions->length() + stack_begin == pos, "must match"); |
|
2776 |
} |
|
2777 |
} |
|
2778 |
||
2779 |
// describe monitors |
|
2780 |
assert(locks_begin <= locks_end, "error in scope iteration"); |
|
2781 |
int nof_locks = locks_end - locks_begin; |
|
2782 |
if (nof_locks > 0) { |
|
2783 |
monitors = new GrowableArray<MonitorValue*>(nof_locks); |
|
2784 |
for (int i = locks_begin; i < locks_end; i++) { |
|
2785 |
monitors->append(location_for_monitor_index(i)); |
|
2786 |
} |
|
2787 |
} |
|
2788 |
||
2789 |
return new IRScopeDebugInfo(cur_scope, cur_bci, locals, expressions, monitors, caller_debug_info); |
|
2790 |
} |
|
2791 |
||
2792 |
||
2793 |
void LinearScan::compute_debug_info(CodeEmitInfo* info, int op_id) { |
|
2794 |
if (!compilation()->needs_debug_information()) { |
|
2795 |
return; |
|
2796 |
} |
|
2797 |
TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id)); |
|
2798 |
||
2799 |
IRScope* innermost_scope = info->scope(); |
|
2800 |
ValueStack* innermost_state = info->stack(); |
|
2801 |
||
2802 |
assert(innermost_scope != NULL && innermost_state != NULL, "why is it missing?"); |
|
2803 |
||
2804 |
int stack_end = innermost_state->stack_size(); |
|
2805 |
int locks_end = innermost_state->locks_size(); |
|
2806 |
||
2807 |
DEBUG_ONLY(check_stack_depth(info, stack_end)); |
|
2808 |
||
2809 |
if (info->_scope_debug_info == NULL) { |
|
2810 |
// compute debug information |
|
2811 |
info->_scope_debug_info = compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state, info->bci(), stack_end, locks_end); |
|
2812 |
} else { |
|
2813 |
// debug information already set. Check that it is correct from the current point of view |
|
2814 |
DEBUG_ONLY(assert_equal(info->_scope_debug_info, compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state, info->bci(), stack_end, locks_end))); |
|
2815 |
} |
|
2816 |
} |
|
2817 |
||
2818 |
||
2819 |
void LinearScan::assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw) { |
|
2820 |
LIR_OpVisitState visitor; |
|
2821 |
int num_inst = instructions->length(); |
|
2822 |
bool has_dead = false; |
|
2823 |
||
2824 |
for (int j = 0; j < num_inst; j++) { |
|
2825 |
LIR_Op* op = instructions->at(j); |
|
2826 |
if (op == NULL) { // this can happen when spill-moves are removed in eliminate_spill_moves |
|
2827 |
has_dead = true; |
|
2828 |
continue; |
|
2829 |
} |
|
2830 |
int op_id = op->id(); |
|
2831 |
||
2832 |
// visit instruction to get list of operands |
|
2833 |
visitor.visit(op); |
|
2834 |
||
2835 |
// iterate all modes of the visitor and process all virtual operands |
|
2836 |
for_each_visitor_mode(mode) { |
|
2837 |
int n = visitor.opr_count(mode); |
|
2838 |
for (int k = 0; k < n; k++) { |
|
2839 |
LIR_Opr opr = visitor.opr_at(mode, k); |
|
2840 |
if (opr->is_virtual_register()) { |
|
2841 |
visitor.set_opr_at(mode, k, color_lir_opr(opr, op_id, mode)); |
|
2842 |
} |
|
2843 |
} |
|
2844 |
} |
|
2845 |
||
2846 |
if (visitor.info_count() > 0) { |
|
2847 |
// exception handling |
|
2848 |
if (compilation()->has_exception_handlers()) { |
|
2849 |
XHandlers* xhandlers = visitor.all_xhandler(); |
|
2850 |
int n = xhandlers->length(); |
|
2851 |
for (int k = 0; k < n; k++) { |
|
2852 |
XHandler* handler = xhandlers->handler_at(k); |
|
2853 |
if (handler->entry_code() != NULL) { |
|
2854 |
assign_reg_num(handler->entry_code()->instructions_list(), NULL); |
|
2855 |
} |
|
2856 |
} |
|
2857 |
} else { |
|
2858 |
assert(visitor.all_xhandler()->length() == 0, "missed exception handler"); |
|
2859 |
} |
|
2860 |
||
2861 |
// compute oop map |
|
2862 |
assert(iw != NULL, "needed for compute_oop_map"); |
|
2863 |
compute_oop_map(iw, visitor, op); |
|
2864 |
||
2865 |
// compute debug information |
|
2866 |
if (!use_fpu_stack_allocation()) { |
|
2867 |
// compute debug information if fpu stack allocation is not needed. |
|
2868 |
// when fpu stack allocation is needed, the debug information can not |
|
2869 |
// be computed here because the exact location of fpu operands is not known |
|
2870 |
// -> debug information is created inside the fpu stack allocator |
|
2871 |
int n = visitor.info_count(); |
|
2872 |
for (int k = 0; k < n; k++) { |
|
2873 |
compute_debug_info(visitor.info_at(k), op_id); |
|
2874 |
} |
|
2875 |
} |
|
2876 |
} |
|
2877 |
||
2878 |
#ifdef ASSERT |
|
2879 |
// make sure we haven't made the op invalid. |
|
2880 |
op->verify(); |
|
2881 |
#endif |
|
2882 |
||
2883 |
// remove useless moves |
|
2884 |
if (op->code() == lir_move) { |
|
2885 |
assert(op->as_Op1() != NULL, "move must be LIR_Op1"); |
|
2886 |
LIR_Op1* move = (LIR_Op1*)op; |
|
2887 |
LIR_Opr src = move->in_opr(); |
|
2888 |
LIR_Opr dst = move->result_opr(); |
|
2889 |
if (dst == src || |
|
2890 |
!dst->is_pointer() && !src->is_pointer() && |
|
2891 |
src->is_same_register(dst)) { |
|
2892 |
instructions->at_put(j, NULL); |
|
2893 |
has_dead = true; |
|
2894 |
} |
|
2895 |
} |
|
2896 |
} |
|
2897 |
||
2898 |
if (has_dead) { |
|
2899 |
// iterate all instructions of the block and remove all null-values. |
|
2900 |
int insert_point = 0; |
|
2901 |
for (int j = 0; j < num_inst; j++) { |
|
2902 |
LIR_Op* op = instructions->at(j); |
|
2903 |
if (op != NULL) { |
|
2904 |
if (insert_point != j) { |
|
2905 |
instructions->at_put(insert_point, op); |
|
2906 |
} |
|
2907 |
insert_point++; |
|
2908 |
} |
|
2909 |
} |
|
2910 |
instructions->truncate(insert_point); |
|
2911 |
} |
|
2912 |
} |
|
2913 |
||
2914 |
void LinearScan::assign_reg_num() { |
|
2915 |
TIME_LINEAR_SCAN(timer_assign_reg_num); |
|
2916 |
||
2917 |
init_compute_debug_info(); |
|
2918 |
IntervalWalker* iw = init_compute_oop_maps(); |
|
2919 |
||
2920 |
int num_blocks = block_count(); |
|
2921 |
for (int i = 0; i < num_blocks; i++) { |
|
2922 |
BlockBegin* block = block_at(i); |
|
2923 |
assign_reg_num(block->lir()->instructions_list(), iw); |
|
2924 |
} |
|
2925 |
} |
|
2926 |
||
2927 |
||
2928 |
void LinearScan::do_linear_scan() { |
|
2929 |
NOT_PRODUCT(_total_timer.begin_method()); |
|
2930 |
||
2931 |
number_instructions(); |
|
2932 |
||
2933 |
NOT_PRODUCT(print_lir(1, "Before Register Allocation")); |
|
2934 |
||
2935 |
compute_local_live_sets(); |
|
2936 |
compute_global_live_sets(); |
|
2937 |
CHECK_BAILOUT(); |
|
2938 |
||
2939 |
build_intervals(); |
|
2940 |
CHECK_BAILOUT(); |
|
2941 |
sort_intervals_before_allocation(); |
|
2942 |
||
2943 |
NOT_PRODUCT(print_intervals("Before Register Allocation")); |
|
2944 |
NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_before_alloc)); |
|
2945 |
||
2946 |
allocate_registers(); |
|
2947 |
CHECK_BAILOUT(); |
|
2948 |
||
2949 |
resolve_data_flow(); |
|
2950 |
if (compilation()->has_exception_handlers()) { |
|
2951 |
resolve_exception_handlers(); |
|
2952 |
} |
|
2953 |
// fill in number of spill slots into frame_map |
|
2954 |
propagate_spill_slots(); |
|
2955 |
CHECK_BAILOUT(); |
|
2956 |
||
2957 |
NOT_PRODUCT(print_intervals("After Register Allocation")); |
|
2958 |
NOT_PRODUCT(print_lir(2, "LIR after register allocation:")); |
|
2959 |
||
2960 |
sort_intervals_after_allocation(); |
|
2566
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
2961 |
|
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
2962 |
DEBUG_ONLY(verify()); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
2963 |
|
1 | 2964 |
eliminate_spill_moves(); |
2965 |
assign_reg_num(); |
|
2966 |
CHECK_BAILOUT(); |
|
2967 |
||
2968 |
NOT_PRODUCT(print_lir(2, "LIR after assignment of register numbers:")); |
|
2969 |
NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_after_asign)); |
|
2970 |
||
2971 |
{ TIME_LINEAR_SCAN(timer_allocate_fpu_stack); |
|
2972 |
||
2973 |
if (use_fpu_stack_allocation()) { |
|
2974 |
allocate_fpu_stack(); // Only has effect on Intel |
|
2975 |
NOT_PRODUCT(print_lir(2, "LIR after FPU stack allocation:")); |
|
2976 |
} |
|
2977 |
} |
|
2978 |
||
2979 |
{ TIME_LINEAR_SCAN(timer_optimize_lir); |
|
2980 |
||
2981 |
EdgeMoveOptimizer::optimize(ir()->code()); |
|
2982 |
ControlFlowOptimizer::optimize(ir()->code()); |
|
2983 |
// check that cfg is still correct after optimizations |
|
2984 |
ir()->verify(); |
|
2985 |
} |
|
2986 |
||
2987 |
NOT_PRODUCT(print_lir(1, "Before Code Generation", false)); |
|
2988 |
NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_final)); |
|
2989 |
NOT_PRODUCT(_total_timer.end_method(this)); |
|
2990 |
} |
|
2991 |
||
2992 |
||
2993 |
// ********** Printing functions |
|
2994 |
||
2995 |
#ifndef PRODUCT |
|
2996 |
||
2997 |
void LinearScan::print_timers(double total) { |
|
2998 |
_total_timer.print(total); |
|
2999 |
} |
|
3000 |
||
3001 |
void LinearScan::print_statistics() { |
|
3002 |
_stat_before_alloc.print("before allocation"); |
|
3003 |
_stat_after_asign.print("after assignment of register"); |
|
3004 |
_stat_final.print("after optimization"); |
|
3005 |
} |
|
3006 |
||
3007 |
void LinearScan::print_bitmap(BitMap& b) { |
|
3008 |
for (unsigned int i = 0; i < b.size(); i++) { |
|
3009 |
if (b.at(i)) tty->print("%d ", i); |
|
3010 |
} |
|
3011 |
tty->cr(); |
|
3012 |
} |
|
3013 |
||
3014 |
void LinearScan::print_intervals(const char* label) { |
|
3015 |
if (TraceLinearScanLevel >= 1) { |
|
3016 |
int i; |
|
3017 |
tty->cr(); |
|
3018 |
tty->print_cr("%s", label); |
|
3019 |
||
3020 |
for (i = 0; i < interval_count(); i++) { |
|
3021 |
Interval* interval = interval_at(i); |
|
3022 |
if (interval != NULL) { |
|
3023 |
interval->print(); |
|
3024 |
} |
|
3025 |
} |
|
3026 |
||
3027 |
tty->cr(); |
|
3028 |
tty->print_cr("--- Basic Blocks ---"); |
|
3029 |
for (i = 0; i < block_count(); i++) { |
|
3030 |
BlockBegin* block = block_at(i); |
|
3031 |
tty->print("B%d [%d, %d, %d, %d] ", block->block_id(), block->first_lir_instruction_id(), block->last_lir_instruction_id(), block->loop_index(), block->loop_depth()); |
|
3032 |
} |
|
3033 |
tty->cr(); |
|
3034 |
tty->cr(); |
|
3035 |
} |
|
3036 |
||
3037 |
if (PrintCFGToFile) { |
|
3038 |
CFGPrinter::print_intervals(&_intervals, label); |
|
3039 |
} |
|
3040 |
} |
|
3041 |
||
3042 |
void LinearScan::print_lir(int level, const char* label, bool hir_valid) { |
|
3043 |
if (TraceLinearScanLevel >= level) { |
|
3044 |
tty->cr(); |
|
3045 |
tty->print_cr("%s", label); |
|
3046 |
print_LIR(ir()->linear_scan_order()); |
|
3047 |
tty->cr(); |
|
3048 |
} |
|
3049 |
||
3050 |
if (level == 1 && PrintCFGToFile) { |
|
3051 |
CFGPrinter::print_cfg(ir()->linear_scan_order(), label, hir_valid, true); |
|
3052 |
} |
|
3053 |
} |
|
3054 |
||
3055 |
#endif //PRODUCT |
|
3056 |
||
3057 |
||
3058 |
// ********** verification functions for allocation |
|
3059 |
// (check that all intervals have a correct register and that no registers are overwritten) |
|
3060 |
#ifdef ASSERT |
|
3061 |
||
3062 |
void LinearScan::verify() { |
|
3063 |
TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying intervals ******************************************")); |
|
3064 |
verify_intervals(); |
|
3065 |
||
3066 |
TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying that no oops are in fixed intervals ****************")); |
|
3067 |
verify_no_oops_in_fixed_intervals(); |
|
3068 |
||
3069 |
TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying that unpinned constants are not alive across block boundaries")); |
|
3070 |
verify_constants(); |
|
3071 |
||
3072 |
TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying register allocation ********************************")); |
|
3073 |
verify_registers(); |
|
3074 |
||
3075 |
TRACE_LINEAR_SCAN(2, tty->print_cr("********* no errors found **********************************************")); |
|
3076 |
} |
|
3077 |
||
3078 |
void LinearScan::verify_intervals() { |
|
3079 |
int len = interval_count(); |
|
3080 |
bool has_error = false; |
|
3081 |
||
3082 |
for (int i = 0; i < len; i++) { |
|
3083 |
Interval* i1 = interval_at(i); |
|
3084 |
if (i1 == NULL) continue; |
|
3085 |
||
3086 |
i1->check_split_children(); |
|
3087 |
||
3088 |
if (i1->reg_num() != i) { |
|
3089 |
tty->print_cr("Interval %d is on position %d in list", i1->reg_num(), i); i1->print(); tty->cr(); |
|
3090 |
has_error = true; |
|
3091 |
} |
|
3092 |
||
3093 |
if (i1->reg_num() >= LIR_OprDesc::vreg_base && i1->type() == T_ILLEGAL) { |
|
3094 |
tty->print_cr("Interval %d has no type assigned", i1->reg_num()); i1->print(); tty->cr(); |
|
3095 |
has_error = true; |
|
3096 |
} |
|
3097 |
||
3098 |
if (i1->assigned_reg() == any_reg) { |
|
3099 |
tty->print_cr("Interval %d has no register assigned", i1->reg_num()); i1->print(); tty->cr(); |
|
3100 |
has_error = true; |
|
3101 |
} |
|
3102 |
||
3103 |
if (i1->assigned_reg() == i1->assigned_regHi()) { |
|
3104 |
tty->print_cr("Interval %d: low and high register equal", i1->reg_num()); i1->print(); tty->cr(); |
|
3105 |
has_error = true; |
|
3106 |
} |
|
3107 |
||
3108 |
if (!is_processed_reg_num(i1->assigned_reg())) { |
|
3109 |
tty->print_cr("Can not have an Interval for an ignored register"); i1->print(); tty->cr(); |
|
3110 |
has_error = true; |
|
3111 |
} |
|
3112 |
||
3113 |
if (i1->first() == Range::end()) { |
|
3114 |
tty->print_cr("Interval %d has no Range", i1->reg_num()); i1->print(); tty->cr(); |
|
3115 |
has_error = true; |
|
3116 |
} |
|
3117 |
||
3118 |
for (Range* r = i1->first(); r != Range::end(); r = r->next()) { |
|
3119 |
if (r->from() >= r->to()) { |
|
3120 |
tty->print_cr("Interval %d has zero length range", i1->reg_num()); i1->print(); tty->cr(); |
|
3121 |
has_error = true; |
|
3122 |
} |
|
3123 |
} |
|
3124 |
||
3125 |
for (int j = i + 1; j < len; j++) { |
|
3126 |
Interval* i2 = interval_at(j); |
|
3127 |
if (i2 == NULL) continue; |
|
3128 |
||
3129 |
// special intervals that are created in MoveResolver |
|
3130 |
// -> ignore them because the range information has no meaning there |
|
3131 |
if (i1->from() == 1 && i1->to() == 2) continue; |
|
3132 |
if (i2->from() == 1 && i2->to() == 2) continue; |
|
3133 |
||
3134 |
int r1 = i1->assigned_reg(); |
|
3135 |
int r1Hi = i1->assigned_regHi(); |
|
3136 |
int r2 = i2->assigned_reg(); |
|
3137 |
int r2Hi = i2->assigned_regHi(); |
|
3138 |
if (i1->intersects(i2) && (r1 == r2 || r1 == r2Hi || (r1Hi != any_reg && (r1Hi == r2 || r1Hi == r2Hi)))) { |
|
3139 |
tty->print_cr("Intervals %d and %d overlap and have the same register assigned", i1->reg_num(), i2->reg_num()); |
|
3140 |
i1->print(); tty->cr(); |
|
3141 |
i2->print(); tty->cr(); |
|
3142 |
has_error = true; |
|
3143 |
} |
|
3144 |
} |
|
3145 |
} |
|
3146 |
||
3147 |
assert(has_error == false, "register allocation invalid"); |
|
3148 |
} |
|
3149 |
||
3150 |
||
3151 |
void LinearScan::verify_no_oops_in_fixed_intervals() { |
|
2566
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3152 |
Interval* fixed_intervals; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3153 |
Interval* other_intervals; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3154 |
create_unhandled_lists(&fixed_intervals, &other_intervals, is_precolored_cpu_interval, NULL); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3155 |
|
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3156 |
// to ensure a walking until the last instruction id, add a dummy interval |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3157 |
// with a high operation id |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3158 |
other_intervals = new Interval(any_reg); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3159 |
other_intervals->add_range(max_jint - 2, max_jint - 1); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3160 |
IntervalWalker* iw = new IntervalWalker(this, fixed_intervals, other_intervals); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3161 |
|
1 | 3162 |
LIR_OpVisitState visitor; |
3163 |
for (int i = 0; i < block_count(); i++) { |
|
3164 |
BlockBegin* block = block_at(i); |
|
3165 |
||
3166 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
3167 |
||
3168 |
for (int j = 0; j < instructions->length(); j++) { |
|
3169 |
LIR_Op* op = instructions->at(j); |
|
3170 |
int op_id = op->id(); |
|
3171 |
||
3172 |
visitor.visit(op); |
|
3173 |
||
2566
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3174 |
if (visitor.info_count() > 0) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3175 |
iw->walk_before(op->id()); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3176 |
bool check_live = true; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3177 |
if (op->code() == lir_move) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3178 |
LIR_Op1* move = (LIR_Op1*)op; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3179 |
check_live = (move->patch_code() == lir_patch_none); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3180 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3181 |
LIR_OpBranch* branch = op->as_OpBranch(); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3182 |
if (branch != NULL && branch->stub() != NULL && branch->stub()->is_exception_throw_stub()) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3183 |
// Don't bother checking the stub in this case since the |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3184 |
// exception stub will never return to normal control flow. |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3185 |
check_live = false; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3186 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3187 |
|
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3188 |
// Make sure none of the fixed registers is live across an |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3189 |
// oopmap since we can't handle that correctly. |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3190 |
if (check_live) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3191 |
for (Interval* interval = iw->active_first(fixedKind); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3192 |
interval != Interval::end(); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3193 |
interval = interval->next()) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3194 |
if (interval->current_to() > op->id() + 1) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3195 |
// This interval is live out of this op so make sure |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3196 |
// that this interval represents some value that's |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3197 |
// referenced by this op either as an input or output. |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3198 |
bool ok = false; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3199 |
for_each_visitor_mode(mode) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3200 |
int n = visitor.opr_count(mode); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3201 |
for (int k = 0; k < n; k++) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3202 |
LIR_Opr opr = visitor.opr_at(mode, k); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3203 |
if (opr->is_fixed_cpu()) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3204 |
if (interval_at(reg_num(opr)) == interval) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3205 |
ok = true; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3206 |
break; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3207 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3208 |
int hi = reg_numHi(opr); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3209 |
if (hi != -1 && interval_at(hi) == interval) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3210 |
ok = true; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3211 |
break; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3212 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3213 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3214 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3215 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3216 |
assert(ok, "fixed intervals should never be live across an oopmap point"); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3217 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3218 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3219 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3220 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3221 |
|
1 | 3222 |
// oop-maps at calls do not contain registers, so check is not needed |
3223 |
if (!visitor.has_call()) { |
|
3224 |
||
3225 |
for_each_visitor_mode(mode) { |
|
3226 |
int n = visitor.opr_count(mode); |
|
3227 |
for (int k = 0; k < n; k++) { |
|
3228 |
LIR_Opr opr = visitor.opr_at(mode, k); |
|
3229 |
||
3230 |
if (opr->is_fixed_cpu() && opr->is_oop()) { |
|
3231 |
// operand is a non-virtual cpu register and contains an oop |
|
3232 |
TRACE_LINEAR_SCAN(4, op->print_on(tty); tty->print("checking operand "); opr->print(); tty->cr()); |
|
3233 |
||
3234 |
Interval* interval = interval_at(reg_num(opr)); |
|
3235 |
assert(interval != NULL, "no interval"); |
|
3236 |
||
3237 |
if (mode == LIR_OpVisitState::inputMode) { |
|
3238 |
if (interval->to() >= op_id + 1) { |
|
3239 |
assert(interval->to() < op_id + 2 || |
|
3240 |
interval->has_hole_between(op_id, op_id + 2), |
|
3241 |
"oop input operand live after instruction"); |
|
3242 |
} |
|
3243 |
} else if (mode == LIR_OpVisitState::outputMode) { |
|
3244 |
if (interval->from() <= op_id - 1) { |
|
3245 |
assert(interval->has_hole_between(op_id - 1, op_id), |
|
3246 |
"oop input operand live after instruction"); |
|
3247 |
} |
|
3248 |
} |
|
3249 |
} |
|
3250 |
} |
|
3251 |
} |
|
3252 |
} |
|
3253 |
} |
|
3254 |
} |
|
3255 |
} |
|
3256 |
||
3257 |
||
3258 |
void LinearScan::verify_constants() { |
|
3259 |
int num_regs = num_virtual_regs(); |
|
3260 |
int size = live_set_size(); |
|
3261 |
int num_blocks = block_count(); |
|
3262 |
||
3263 |
for (int i = 0; i < num_blocks; i++) { |
|
3264 |
BlockBegin* block = block_at(i); |
|
3265 |
BitMap live_at_edge = block->live_in(); |
|
3266 |
||
3267 |
// visit all registers where the live_at_edge bit is set |
|
1066 | 3268 |
for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) { |
1 | 3269 |
TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id())); |
3270 |
||
3271 |
Value value = gen()->instruction_for_vreg(r); |
|
3272 |
||
3273 |
assert(value != NULL, "all intervals live across block boundaries must have Value"); |
|
3274 |
assert(value->operand()->is_register() && value->operand()->is_virtual(), "value must have virtual operand"); |
|
3275 |
assert(value->operand()->vreg_number() == r, "register number must match"); |
|
3276 |
// TKR assert(value->as_Constant() == NULL || value->is_pinned(), "only pinned constants can be alive accross block boundaries"); |
|
3277 |
} |
|
3278 |
} |
|
3279 |
} |
|
3280 |
||
3281 |
||
3282 |
class RegisterVerifier: public StackObj { |
|
3283 |
private: |
|
3284 |
LinearScan* _allocator; |
|
3285 |
BlockList _work_list; // all blocks that must be processed |
|
3286 |
IntervalsList _saved_states; // saved information of previous check |
|
3287 |
||
3288 |
// simplified access to methods of LinearScan |
|
3289 |
Compilation* compilation() const { return _allocator->compilation(); } |
|
3290 |
Interval* interval_at(int reg_num) const { return _allocator->interval_at(reg_num); } |
|
3291 |
int reg_num(LIR_Opr opr) const { return _allocator->reg_num(opr); } |
|
3292 |
||
3293 |
// currently, only registers are processed |
|
3294 |
int state_size() { return LinearScan::nof_regs; } |
|
3295 |
||
3296 |
// accessors |
|
3297 |
IntervalList* state_for_block(BlockBegin* block) { return _saved_states.at(block->block_id()); } |
|
3298 |
void set_state_for_block(BlockBegin* block, IntervalList* saved_state) { _saved_states.at_put(block->block_id(), saved_state); } |
|
3299 |
void add_to_work_list(BlockBegin* block) { if (!_work_list.contains(block)) _work_list.append(block); } |
|
3300 |
||
3301 |
// helper functions |
|
3302 |
IntervalList* copy(IntervalList* input_state); |
|
3303 |
void state_put(IntervalList* input_state, int reg, Interval* interval); |
|
3304 |
bool check_state(IntervalList* input_state, int reg, Interval* interval); |
|
3305 |
||
3306 |
void process_block(BlockBegin* block); |
|
3307 |
void process_xhandler(XHandler* xhandler, IntervalList* input_state); |
|
3308 |
void process_successor(BlockBegin* block, IntervalList* input_state); |
|
3309 |
void process_operations(LIR_List* ops, IntervalList* input_state); |
|
3310 |
||
3311 |
public: |
|
3312 |
RegisterVerifier(LinearScan* allocator) |
|
3313 |
: _allocator(allocator) |
|
3314 |
, _work_list(16) |
|
3315 |
, _saved_states(BlockBegin::number_of_blocks(), NULL) |
|
3316 |
{ } |
|
3317 |
||
3318 |
void verify(BlockBegin* start); |
|
3319 |
}; |
|
3320 |
||
3321 |
||
3322 |
// entry function from LinearScan that starts the verification |
|
3323 |
void LinearScan::verify_registers() { |
|
3324 |
RegisterVerifier verifier(this); |
|
3325 |
verifier.verify(block_at(0)); |
|
3326 |
} |
|
3327 |
||
3328 |
||
3329 |
void RegisterVerifier::verify(BlockBegin* start) { |
|
3330 |
// setup input registers (method arguments) for first block |
|
3331 |
IntervalList* input_state = new IntervalList(state_size(), NULL); |
|
3332 |
CallingConvention* args = compilation()->frame_map()->incoming_arguments(); |
|
3333 |
for (int n = 0; n < args->length(); n++) { |
|
3334 |
LIR_Opr opr = args->at(n); |
|
3335 |
if (opr->is_register()) { |
|
3336 |
Interval* interval = interval_at(reg_num(opr)); |
|
3337 |
||
3338 |
if (interval->assigned_reg() < state_size()) { |
|
3339 |
input_state->at_put(interval->assigned_reg(), interval); |
|
3340 |
} |
|
3341 |
if (interval->assigned_regHi() != LinearScan::any_reg && interval->assigned_regHi() < state_size()) { |
|
3342 |
input_state->at_put(interval->assigned_regHi(), interval); |
|
3343 |
} |
|
3344 |
} |
|
3345 |
} |
|
3346 |
||
3347 |
set_state_for_block(start, input_state); |
|
3348 |
add_to_work_list(start); |
|
3349 |
||
3350 |
// main loop for verification |
|
3351 |
do { |
|
3352 |
BlockBegin* block = _work_list.at(0); |
|
3353 |
_work_list.remove_at(0); |
|
3354 |
||
3355 |
process_block(block); |
|
3356 |
} while (!_work_list.is_empty()); |
|
3357 |
} |
|
3358 |
||
3359 |
void RegisterVerifier::process_block(BlockBegin* block) { |
|
3360 |
TRACE_LINEAR_SCAN(2, tty->cr(); tty->print_cr("process_block B%d", block->block_id())); |
|
3361 |
||
3362 |
// must copy state because it is modified |
|
3363 |
IntervalList* input_state = copy(state_for_block(block)); |
|
3364 |
||
3365 |
if (TraceLinearScanLevel >= 4) { |
|
3366 |
tty->print_cr("Input-State of intervals:"); |
|
3367 |
tty->print(" "); |
|
3368 |
for (int i = 0; i < state_size(); i++) { |
|
3369 |
if (input_state->at(i) != NULL) { |
|
3370 |
tty->print(" %4d", input_state->at(i)->reg_num()); |
|
3371 |
} else { |
|
3372 |
tty->print(" __"); |
|
3373 |
} |
|
3374 |
} |
|
3375 |
tty->cr(); |
|
3376 |
tty->cr(); |
|
3377 |
} |
|
3378 |
||
3379 |
// process all operations of the block |
|
3380 |
process_operations(block->lir(), input_state); |
|
3381 |
||
3382 |
// iterate all successors |
|
3383 |
for (int i = 0; i < block->number_of_sux(); i++) { |
|
3384 |
process_successor(block->sux_at(i), input_state); |
|
3385 |
} |
|
3386 |
} |
|
3387 |
||
3388 |
void RegisterVerifier::process_xhandler(XHandler* xhandler, IntervalList* input_state) { |
|
3389 |
TRACE_LINEAR_SCAN(2, tty->print_cr("process_xhandler B%d", xhandler->entry_block()->block_id())); |
|
3390 |
||
3391 |
// must copy state because it is modified |
|
3392 |
input_state = copy(input_state); |
|
3393 |
||
3394 |
if (xhandler->entry_code() != NULL) { |
|
3395 |
process_operations(xhandler->entry_code(), input_state); |
|
3396 |
} |
|
3397 |
process_successor(xhandler->entry_block(), input_state); |
|
3398 |
} |
|
3399 |
||
3400 |
void RegisterVerifier::process_successor(BlockBegin* block, IntervalList* input_state) { |
|
3401 |
IntervalList* saved_state = state_for_block(block); |
|
3402 |
||
3403 |
if (saved_state != NULL) { |
|
3404 |
// this block was already processed before. |
|
3405 |
// check if new input_state is consistent with saved_state |
|
3406 |
||
3407 |
bool saved_state_correct = true; |
|
3408 |
for (int i = 0; i < state_size(); i++) { |
|
3409 |
if (input_state->at(i) != saved_state->at(i)) { |
|
3410 |
// current input_state and previous saved_state assume a different |
|
3411 |
// interval in this register -> assume that this register is invalid |
|
3412 |
if (saved_state->at(i) != NULL) { |
|
3413 |
// invalidate old calculation only if it assumed that |
|
3414 |
// register was valid. when the register was already invalid, |
|
3415 |
// then the old calculation was correct. |
|
3416 |
saved_state_correct = false; |
|
3417 |
saved_state->at_put(i, NULL); |
|
3418 |
||
3419 |
TRACE_LINEAR_SCAN(4, tty->print_cr("process_successor B%d: invalidating slot %d", block->block_id(), i)); |
|
3420 |
} |
|
3421 |
} |
|
3422 |
} |
|
3423 |
||
3424 |
if (saved_state_correct) { |
|
3425 |
// already processed block with correct input_state |
|
3426 |
TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: previous visit already correct", block->block_id())); |
|
3427 |
} else { |
|
3428 |
// must re-visit this block |
|
3429 |
TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: must re-visit because input state changed", block->block_id())); |
|
3430 |
add_to_work_list(block); |
|
3431 |
} |
|
3432 |
||
3433 |
} else { |
|
3434 |
// block was not processed before, so set initial input_state |
|
3435 |
TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: initial visit", block->block_id())); |
|
3436 |
||
3437 |
set_state_for_block(block, copy(input_state)); |
|
3438 |
add_to_work_list(block); |
|
3439 |
} |
|
3440 |
} |
|
3441 |
||
3442 |
||
3443 |
IntervalList* RegisterVerifier::copy(IntervalList* input_state) { |
|
3444 |
IntervalList* copy_state = new IntervalList(input_state->length()); |
|
3445 |
copy_state->push_all(input_state); |
|
3446 |
return copy_state; |
|
3447 |
} |
|
3448 |
||
3449 |
void RegisterVerifier::state_put(IntervalList* input_state, int reg, Interval* interval) { |
|
3450 |
if (reg != LinearScan::any_reg && reg < state_size()) { |
|
3451 |
if (interval != NULL) { |
|
3452 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" reg[%d] = %d", reg, interval->reg_num())); |
|
3453 |
} else if (input_state->at(reg) != NULL) { |
|
3454 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" reg[%d] = NULL", reg)); |
|
3455 |
} |
|
3456 |
||
3457 |
input_state->at_put(reg, interval); |
|
3458 |
} |
|
3459 |
} |
|
3460 |
||
3461 |
bool RegisterVerifier::check_state(IntervalList* input_state, int reg, Interval* interval) { |
|
3462 |
if (reg != LinearScan::any_reg && reg < state_size()) { |
|
3463 |
if (input_state->at(reg) != interval) { |
|
3464 |
tty->print_cr("!! Error in register allocation: register %d does not contain interval %d", reg, interval->reg_num()); |
|
3465 |
return true; |
|
3466 |
} |
|
3467 |
} |
|
3468 |
return false; |
|
3469 |
} |
|
3470 |
||
3471 |
void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_state) { |
|
3472 |
// visit all instructions of the block |
|
3473 |
LIR_OpVisitState visitor; |
|
3474 |
bool has_error = false; |
|
3475 |
||
3476 |
for (int i = 0; i < ops->length(); i++) { |
|
3477 |
LIR_Op* op = ops->at(i); |
|
3478 |
visitor.visit(op); |
|
3479 |
||
3480 |
TRACE_LINEAR_SCAN(4, op->print_on(tty)); |
|
3481 |
||
3482 |
// check if input operands are correct |
|
3483 |
int j; |
|
3484 |
int n = visitor.opr_count(LIR_OpVisitState::inputMode); |
|
3485 |
for (j = 0; j < n; j++) { |
|
3486 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, j); |
|
3487 |
if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) { |
|
3488 |
Interval* interval = interval_at(reg_num(opr)); |
|
3489 |
if (op->id() != -1) { |
|
3490 |
interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::inputMode); |
|
3491 |
} |
|
3492 |
||
3493 |
has_error |= check_state(input_state, interval->assigned_reg(), interval->split_parent()); |
|
3494 |
has_error |= check_state(input_state, interval->assigned_regHi(), interval->split_parent()); |
|
3495 |
||
3496 |
// When an operand is marked with is_last_use, then the fpu stack allocator |
|
3497 |
// removes the register from the fpu stack -> the register contains no value |
|
3498 |
if (opr->is_last_use()) { |
|
3499 |
state_put(input_state, interval->assigned_reg(), NULL); |
|
3500 |
state_put(input_state, interval->assigned_regHi(), NULL); |
|
3501 |
} |
|
3502 |
} |
|
3503 |
} |
|
3504 |
||
3505 |
// invalidate all caller save registers at calls |
|
3506 |
if (visitor.has_call()) { |
|
3507 |
for (j = 0; j < FrameMap::nof_caller_save_cpu_regs; j++) { |
|
3508 |
state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL); |
|
3509 |
} |
|
3510 |
for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) { |
|
3511 |
state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL); |
|
3512 |
} |
|
3513 |
||
1066 | 3514 |
#ifdef X86 |
1 | 3515 |
for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) { |
3516 |
state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL); |
|
3517 |
} |
|
3518 |
#endif |
|
3519 |
} |
|
3520 |
||
3521 |
// process xhandler before output and temp operands |
|
3522 |
XHandlers* xhandlers = visitor.all_xhandler(); |
|
3523 |
n = xhandlers->length(); |
|
3524 |
for (int k = 0; k < n; k++) { |
|
3525 |
process_xhandler(xhandlers->handler_at(k), input_state); |
|
3526 |
} |
|
3527 |
||
3528 |
// set temp operands (some operations use temp operands also as output operands, so can't set them NULL) |
|
3529 |
n = visitor.opr_count(LIR_OpVisitState::tempMode); |
|
3530 |
for (j = 0; j < n; j++) { |
|
3531 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, j); |
|
3532 |
if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) { |
|
3533 |
Interval* interval = interval_at(reg_num(opr)); |
|
3534 |
if (op->id() != -1) { |
|
3535 |
interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::tempMode); |
|
3536 |
} |
|
3537 |
||
3538 |
state_put(input_state, interval->assigned_reg(), interval->split_parent()); |
|
3539 |
state_put(input_state, interval->assigned_regHi(), interval->split_parent()); |
|
3540 |
} |
|
3541 |
} |
|
3542 |
||
3543 |
// set output operands |
|
3544 |
n = visitor.opr_count(LIR_OpVisitState::outputMode); |
|
3545 |
for (j = 0; j < n; j++) { |
|
3546 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, j); |
|
3547 |
if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) { |
|
3548 |
Interval* interval = interval_at(reg_num(opr)); |
|
3549 |
if (op->id() != -1) { |
|
3550 |
interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::outputMode); |
|
3551 |
} |
|
3552 |
||
3553 |
state_put(input_state, interval->assigned_reg(), interval->split_parent()); |
|
3554 |
state_put(input_state, interval->assigned_regHi(), interval->split_parent()); |
|
3555 |
} |
|
3556 |
} |
|
3557 |
} |
|
3558 |
assert(has_error == false, "Error in register allocation"); |
|
3559 |
} |
|
3560 |
||
3561 |
#endif // ASSERT |
|
3562 |
||
3563 |
||
3564 |
||
3565 |
// **** Implementation of MoveResolver ****************************** |
|
3566 |
||
3567 |
MoveResolver::MoveResolver(LinearScan* allocator) : |
|
3568 |
_allocator(allocator), |
|
3569 |
_multiple_reads_allowed(false), |
|
3570 |
_mapping_from(8), |
|
3571 |
_mapping_from_opr(8), |
|
3572 |
_mapping_to(8), |
|
3573 |
_insert_list(NULL), |
|
3574 |
_insert_idx(-1), |
|
3575 |
_insertion_buffer() |
|
3576 |
{ |
|
3577 |
for (int i = 0; i < LinearScan::nof_regs; i++) { |
|
3578 |
_register_blocked[i] = 0; |
|
3579 |
} |
|
3580 |
DEBUG_ONLY(check_empty()); |
|
3581 |
} |
|
3582 |
||
3583 |
||
3584 |
#ifdef ASSERT |
|
3585 |
||
3586 |
void MoveResolver::check_empty() { |
|
3587 |
assert(_mapping_from.length() == 0 && _mapping_from_opr.length() == 0 && _mapping_to.length() == 0, "list must be empty before and after processing"); |
|
3588 |
for (int i = 0; i < LinearScan::nof_regs; i++) { |
|
3589 |
assert(register_blocked(i) == 0, "register map must be empty before and after processing"); |
|
3590 |
} |
|
3591 |
assert(_multiple_reads_allowed == false, "must have default value"); |
|
3592 |
} |
|
3593 |
||
3594 |
void MoveResolver::verify_before_resolve() { |
|
3595 |
assert(_mapping_from.length() == _mapping_from_opr.length(), "length must be equal"); |
|
3596 |
assert(_mapping_from.length() == _mapping_to.length(), "length must be equal"); |
|
3597 |
assert(_insert_list != NULL && _insert_idx != -1, "insert position not set"); |
|
3598 |
||
3599 |
int i, j; |
|
3600 |
if (!_multiple_reads_allowed) { |
|
3601 |
for (i = 0; i < _mapping_from.length(); i++) { |
|
3602 |
for (j = i + 1; j < _mapping_from.length(); j++) { |
|
3603 |
assert(_mapping_from.at(i) == NULL || _mapping_from.at(i) != _mapping_from.at(j), "cannot read from same interval twice"); |
|
3604 |
} |
|
3605 |
} |
|
3606 |
} |
|
3607 |
||
3608 |
for (i = 0; i < _mapping_to.length(); i++) { |
|
3609 |
for (j = i + 1; j < _mapping_to.length(); j++) { |
|
3610 |
assert(_mapping_to.at(i) != _mapping_to.at(j), "cannot write to same interval twice"); |
|
3611 |
} |
|
3612 |
} |
|
3613 |
||
3614 |
||
3615 |
BitMap used_regs(LinearScan::nof_regs + allocator()->frame_map()->argcount() + allocator()->max_spills()); |
|
3616 |
used_regs.clear(); |
|
3617 |
if (!_multiple_reads_allowed) { |
|
3618 |
for (i = 0; i < _mapping_from.length(); i++) { |
|
3619 |
Interval* it = _mapping_from.at(i); |
|
3620 |
if (it != NULL) { |
|
3621 |
assert(!used_regs.at(it->assigned_reg()), "cannot read from same register twice"); |
|
3622 |
used_regs.set_bit(it->assigned_reg()); |
|
3623 |
||
3624 |
if (it->assigned_regHi() != LinearScan::any_reg) { |
|
3625 |
assert(!used_regs.at(it->assigned_regHi()), "cannot read from same register twice"); |
|
3626 |
used_regs.set_bit(it->assigned_regHi()); |
|
3627 |
} |
|
3628 |
} |
|
3629 |
} |
|
3630 |
} |
|
3631 |
||
3632 |
used_regs.clear(); |
|
3633 |
for (i = 0; i < _mapping_to.length(); i++) { |
|
3634 |
Interval* it = _mapping_to.at(i); |
|
3635 |
assert(!used_regs.at(it->assigned_reg()), "cannot write to same register twice"); |
|
3636 |
used_regs.set_bit(it->assigned_reg()); |
|
3637 |
||
3638 |
if (it->assigned_regHi() != LinearScan::any_reg) { |
|
3639 |
assert(!used_regs.at(it->assigned_regHi()), "cannot write to same register twice"); |
|
3640 |
used_regs.set_bit(it->assigned_regHi()); |
|
3641 |
} |
|
3642 |
} |
|
3643 |
||
3644 |
used_regs.clear(); |
|
3645 |
for (i = 0; i < _mapping_from.length(); i++) { |
|
3646 |
Interval* it = _mapping_from.at(i); |
|
3647 |
if (it != NULL && it->assigned_reg() >= LinearScan::nof_regs) { |
|
3648 |
used_regs.set_bit(it->assigned_reg()); |
|
3649 |
} |
|
3650 |
} |
|
3651 |
for (i = 0; i < _mapping_to.length(); i++) { |
|
3652 |
Interval* it = _mapping_to.at(i); |
|
3653 |
assert(!used_regs.at(it->assigned_reg()) || it->assigned_reg() == _mapping_from.at(i)->assigned_reg(), "stack slots used in _mapping_from must be disjoint to _mapping_to"); |
|
3654 |
} |
|
3655 |
} |
|
3656 |
||
3657 |
#endif // ASSERT |
|
3658 |
||
3659 |
||
3660 |
// mark assigned_reg and assigned_regHi of the interval as blocked |
|
3661 |
void MoveResolver::block_registers(Interval* it) { |
|
3662 |
int reg = it->assigned_reg(); |
|
3663 |
if (reg < LinearScan::nof_regs) { |
|
3664 |
assert(_multiple_reads_allowed || register_blocked(reg) == 0, "register already marked as used"); |
|
3665 |
set_register_blocked(reg, 1); |
|
3666 |
} |
|
3667 |
reg = it->assigned_regHi(); |
|
3668 |
if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) { |
|
3669 |
assert(_multiple_reads_allowed || register_blocked(reg) == 0, "register already marked as used"); |
|
3670 |
set_register_blocked(reg, 1); |
|
3671 |
} |
|
3672 |
} |
|
3673 |
||
3674 |
// mark assigned_reg and assigned_regHi of the interval as unblocked |
|
3675 |
void MoveResolver::unblock_registers(Interval* it) { |
|
3676 |
int reg = it->assigned_reg(); |
|
3677 |
if (reg < LinearScan::nof_regs) { |
|
3678 |
assert(register_blocked(reg) > 0, "register already marked as unused"); |
|
3679 |
set_register_blocked(reg, -1); |
|
3680 |
} |
|
3681 |
reg = it->assigned_regHi(); |
|
3682 |
if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) { |
|
3683 |
assert(register_blocked(reg) > 0, "register already marked as unused"); |
|
3684 |
set_register_blocked(reg, -1); |
|
3685 |
} |
|
3686 |
} |
|
3687 |
||
3688 |
// check if assigned_reg and assigned_regHi of the to-interval are not blocked (or only blocked by from) |
|
3689 |
bool MoveResolver::save_to_process_move(Interval* from, Interval* to) { |
|
3690 |
int from_reg = -1; |
|
3691 |
int from_regHi = -1; |
|
3692 |
if (from != NULL) { |
|
3693 |
from_reg = from->assigned_reg(); |
|
3694 |
from_regHi = from->assigned_regHi(); |
|
3695 |
} |
|
3696 |
||
3697 |
int reg = to->assigned_reg(); |
|
3698 |
if (reg < LinearScan::nof_regs) { |
|
3699 |
if (register_blocked(reg) > 1 || (register_blocked(reg) == 1 && reg != from_reg && reg != from_regHi)) { |
|
3700 |
return false; |
|
3701 |
} |
|
3702 |
} |
|
3703 |
reg = to->assigned_regHi(); |
|
3704 |
if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) { |
|
3705 |
if (register_blocked(reg) > 1 || (register_blocked(reg) == 1 && reg != from_reg && reg != from_regHi)) { |
|
3706 |
return false; |
|
3707 |
} |
|
3708 |
} |
|
3709 |
||
3710 |
return true; |
|
3711 |
} |
|
3712 |
||
3713 |
||
3714 |
void MoveResolver::create_insertion_buffer(LIR_List* list) { |
|
3715 |
assert(!_insertion_buffer.initialized(), "overwriting existing buffer"); |
|
3716 |
_insertion_buffer.init(list); |
|
3717 |
} |
|
3718 |
||
3719 |
void MoveResolver::append_insertion_buffer() { |
|
3720 |
if (_insertion_buffer.initialized()) { |
|
3721 |
_insertion_buffer.lir_list()->append(&_insertion_buffer); |
|
3722 |
} |
|
3723 |
assert(!_insertion_buffer.initialized(), "must be uninitialized now"); |
|
3724 |
||
3725 |
_insert_list = NULL; |
|
3726 |
_insert_idx = -1; |
|
3727 |
} |
|
3728 |
||
3729 |
void MoveResolver::insert_move(Interval* from_interval, Interval* to_interval) { |
|
3730 |
assert(from_interval->reg_num() != to_interval->reg_num(), "from and to interval equal"); |
|
3731 |
assert(from_interval->type() == to_interval->type(), "move between different types"); |
|
3732 |
assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first"); |
|
3733 |
assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer"); |
|
3734 |
||
3735 |
LIR_Opr from_opr = LIR_OprFact::virtual_register(from_interval->reg_num(), from_interval->type()); |
|
3736 |
LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type()); |
|
3737 |
||
3738 |
if (!_multiple_reads_allowed) { |
|
3739 |
// the last_use flag is an optimization for FPU stack allocation. When the same |
|
3740 |
// input interval is used in more than one move, then it is too difficult to determine |
|
3741 |
// if this move is really the last use. |
|
3742 |
from_opr = from_opr->make_last_use(); |
|
3743 |
} |
|
3744 |
_insertion_buffer.move(_insert_idx, from_opr, to_opr); |
|
3745 |
||
3746 |
TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: inserted move from register %d (%d, %d) to %d (%d, %d)", from_interval->reg_num(), from_interval->assigned_reg(), from_interval->assigned_regHi(), to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi())); |
|
3747 |
} |
|
3748 |
||
3749 |
void MoveResolver::insert_move(LIR_Opr from_opr, Interval* to_interval) { |
|
3750 |
assert(from_opr->type() == to_interval->type(), "move between different types"); |
|
3751 |
assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first"); |
|
3752 |
assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer"); |
|
3753 |
||
3754 |
LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type()); |
|
3755 |
_insertion_buffer.move(_insert_idx, from_opr, to_opr); |
|
3756 |
||
3757 |
TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: inserted move from constant "); from_opr->print(); tty->print_cr(" to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi())); |
|
3758 |
} |
|
3759 |
||
3760 |
||
3761 |
void MoveResolver::resolve_mappings() { |
|
3762 |
TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: resolving mappings for Block B%d, index %d", _insert_list->block() != NULL ? _insert_list->block()->block_id() : -1, _insert_idx)); |
|
3763 |
DEBUG_ONLY(verify_before_resolve()); |
|
3764 |
||
3765 |
// Block all registers that are used as input operands of a move. |
|
3766 |
// When a register is blocked, no move to this register is emitted. |
|
3767 |
// This is necessary for detecting cycles in moves. |
|
3768 |
int i; |
|
3769 |
for (i = _mapping_from.length() - 1; i >= 0; i--) { |
|
3770 |
Interval* from_interval = _mapping_from.at(i); |
|
3771 |
if (from_interval != NULL) { |
|
3772 |
block_registers(from_interval); |
|
3773 |
} |
|
3774 |
} |
|
3775 |
||
3776 |
int spill_candidate = -1; |
|
3777 |
while (_mapping_from.length() > 0) { |
|
3778 |
bool processed_interval = false; |
|
3779 |
||
3780 |
for (i = _mapping_from.length() - 1; i >= 0; i--) { |
|
3781 |
Interval* from_interval = _mapping_from.at(i); |
|
3782 |
Interval* to_interval = _mapping_to.at(i); |
|
3783 |
||
3784 |
if (save_to_process_move(from_interval, to_interval)) { |
|
3785 |
// this inverval can be processed because target is free |
|
3786 |
if (from_interval != NULL) { |
|
3787 |
insert_move(from_interval, to_interval); |
|
3788 |
unblock_registers(from_interval); |
|
3789 |
} else { |
|
3790 |
insert_move(_mapping_from_opr.at(i), to_interval); |
|
3791 |
} |
|
3792 |
_mapping_from.remove_at(i); |
|
3793 |
_mapping_from_opr.remove_at(i); |
|
3794 |
_mapping_to.remove_at(i); |
|
3795 |
||
3796 |
processed_interval = true; |
|
3797 |
} else if (from_interval != NULL && from_interval->assigned_reg() < LinearScan::nof_regs) { |
|
3798 |
// this interval cannot be processed now because target is not free |
|
3799 |
// it starts in a register, so it is a possible candidate for spilling |
|
3800 |
spill_candidate = i; |
|
3801 |
} |
|
3802 |
} |
|
3803 |
||
3804 |
if (!processed_interval) { |
|
3805 |
// no move could be processed because there is a cycle in the move list |
|
3806 |
// (e.g. r1 -> r2, r2 -> r1), so one interval must be spilled to memory |
|
3807 |
assert(spill_candidate != -1, "no interval in register for spilling found"); |
|
3808 |
||
3809 |
// create a new spill interval and assign a stack slot to it |
|
3810 |
Interval* from_interval = _mapping_from.at(spill_candidate); |
|
3811 |
Interval* spill_interval = new Interval(-1); |
|
3812 |
spill_interval->set_type(from_interval->type()); |
|
3813 |
||
3814 |
// add a dummy range because real position is difficult to calculate |
|
3815 |
// Note: this range is a special case when the integrity of the allocation is checked |
|
3816 |
spill_interval->add_range(1, 2); |
|
3817 |
||
3818 |
// do not allocate a new spill slot for temporary interval, but |
|
3819 |
// use spill slot assigned to from_interval. Otherwise moves from |
|
3820 |
// one stack slot to another can happen (not allowed by LIR_Assembler |
|
3821 |
int spill_slot = from_interval->canonical_spill_slot(); |
|
3822 |
if (spill_slot < 0) { |
|
3823 |
spill_slot = allocator()->allocate_spill_slot(type2spill_size[spill_interval->type()] == 2); |
|
3824 |
from_interval->set_canonical_spill_slot(spill_slot); |
|
3825 |
} |
|
3826 |
spill_interval->assign_reg(spill_slot); |
|
3827 |
allocator()->append_interval(spill_interval); |
|
3828 |
||
3829 |
TRACE_LINEAR_SCAN(4, tty->print_cr("created new Interval %d for spilling", spill_interval->reg_num())); |
|
3830 |
||
3831 |
// insert a move from register to stack and update the mapping |
|
3832 |
insert_move(from_interval, spill_interval); |
|
3833 |
_mapping_from.at_put(spill_candidate, spill_interval); |
|
3834 |
unblock_registers(from_interval); |
|
3835 |
} |
|
3836 |
} |
|
3837 |
||
3838 |
// reset to default value |
|
3839 |
_multiple_reads_allowed = false; |
|
3840 |
||
3841 |
// check that all intervals have been processed |
|
3842 |
DEBUG_ONLY(check_empty()); |
|
3843 |
} |
|
3844 |
||
3845 |
||
3846 |
void MoveResolver::set_insert_position(LIR_List* insert_list, int insert_idx) { |
|
3847 |
TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: setting insert position to Block B%d, index %d", insert_list->block() != NULL ? insert_list->block()->block_id() : -1, insert_idx)); |
|
3848 |
assert(_insert_list == NULL && _insert_idx == -1, "use move_insert_position instead of set_insert_position when data already set"); |
|
3849 |
||
3850 |
create_insertion_buffer(insert_list); |
|
3851 |
_insert_list = insert_list; |
|
3852 |
_insert_idx = insert_idx; |
|
3853 |
} |
|
3854 |
||
3855 |
void MoveResolver::move_insert_position(LIR_List* insert_list, int insert_idx) { |
|
3856 |
TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: moving insert position to Block B%d, index %d", insert_list->block() != NULL ? insert_list->block()->block_id() : -1, insert_idx)); |
|
3857 |
||
3858 |
if (_insert_list != NULL && (insert_list != _insert_list || insert_idx != _insert_idx)) { |
|
3859 |
// insert position changed -> resolve current mappings |
|
3860 |
resolve_mappings(); |
|
3861 |
} |
|
3862 |
||
3863 |
if (insert_list != _insert_list) { |
|
3864 |
// block changed -> append insertion_buffer because it is |
|
3865 |
// bound to a specific block and create a new insertion_buffer |
|
3866 |
append_insertion_buffer(); |
|
3867 |
create_insertion_buffer(insert_list); |
|
3868 |
} |
|
3869 |
||
3870 |
_insert_list = insert_list; |
|
3871 |
_insert_idx = insert_idx; |
|
3872 |
} |
|
3873 |
||
3874 |
void MoveResolver::add_mapping(Interval* from_interval, Interval* to_interval) { |
|
3875 |
TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: adding mapping from %d (%d, %d) to %d (%d, %d)", from_interval->reg_num(), from_interval->assigned_reg(), from_interval->assigned_regHi(), to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi())); |
|
3876 |
||
3877 |
_mapping_from.append(from_interval); |
|
3878 |
_mapping_from_opr.append(LIR_OprFact::illegalOpr); |
|
3879 |
_mapping_to.append(to_interval); |
|
3880 |
} |
|
3881 |
||
3882 |
||
3883 |
void MoveResolver::add_mapping(LIR_Opr from_opr, Interval* to_interval) { |
|
3884 |
TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: adding mapping from "); from_opr->print(); tty->print_cr(" to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi())); |
|
3885 |
assert(from_opr->is_constant(), "only for constants"); |
|
3886 |
||
3887 |
_mapping_from.append(NULL); |
|
3888 |
_mapping_from_opr.append(from_opr); |
|
3889 |
_mapping_to.append(to_interval); |
|
3890 |
} |
|
3891 |
||
3892 |
void MoveResolver::resolve_and_append_moves() { |
|
3893 |
if (has_mappings()) { |
|
3894 |
resolve_mappings(); |
|
3895 |
} |
|
3896 |
append_insertion_buffer(); |
|
3897 |
} |
|
3898 |
||
3899 |
||
3900 |
||
3901 |
// **** Implementation of Range ************************************* |
|
3902 |
||
3903 |
Range::Range(int from, int to, Range* next) : |
|
3904 |
_from(from), |
|
3905 |
_to(to), |
|
3906 |
_next(next) |
|
3907 |
{ |
|
3908 |
} |
|
3909 |
||
3910 |
// initialize sentinel |
|
3911 |
Range* Range::_end = NULL; |
|
3912 |
void Range::initialize() { |
|
3913 |
_end = new Range(max_jint, max_jint, NULL); |
|
3914 |
} |
|
3915 |
||
3916 |
int Range::intersects_at(Range* r2) const { |
|
3917 |
const Range* r1 = this; |
|
3918 |
||
3919 |
assert(r1 != NULL && r2 != NULL, "null ranges not allowed"); |
|
3920 |
assert(r1 != _end && r2 != _end, "empty ranges not allowed"); |
|
3921 |
||
3922 |
do { |
|
3923 |
if (r1->from() < r2->from()) { |
|
3924 |
if (r1->to() <= r2->from()) { |
|
3925 |
r1 = r1->next(); if (r1 == _end) return -1; |
|
3926 |
} else { |
|
3927 |
return r2->from(); |
|
3928 |
} |
|
3929 |
} else if (r2->from() < r1->from()) { |
|
3930 |
if (r2->to() <= r1->from()) { |
|
3931 |
r2 = r2->next(); if (r2 == _end) return -1; |
|
3932 |
} else { |
|
3933 |
return r1->from(); |
|
3934 |
} |
|
3935 |
} else { // r1->from() == r2->from() |
|
3936 |
if (r1->from() == r1->to()) { |
|
3937 |
r1 = r1->next(); if (r1 == _end) return -1; |
|
3938 |
} else if (r2->from() == r2->to()) { |
|
3939 |
r2 = r2->next(); if (r2 == _end) return -1; |
|
3940 |
} else { |
|
3941 |
return r1->from(); |
|
3942 |
} |
|
3943 |
} |
|
3944 |
} while (true); |
|
3945 |
} |
|
3946 |
||
3947 |
#ifndef PRODUCT |
|
3948 |
void Range::print(outputStream* out) const { |
|
3949 |
out->print("[%d, %d[ ", _from, _to); |
|
3950 |
} |
|
3951 |
#endif |
|
3952 |
||
3953 |
||
3954 |
||
3955 |
// **** Implementation of Interval ********************************** |
|
3956 |
||
3957 |
// initialize sentinel |
|
3958 |
Interval* Interval::_end = NULL; |
|
3959 |
void Interval::initialize() { |
|
3960 |
Range::initialize(); |
|
3961 |
_end = new Interval(-1); |
|
3962 |
} |
|
3963 |
||
3964 |
Interval::Interval(int reg_num) : |
|
3965 |
_reg_num(reg_num), |
|
3966 |
_type(T_ILLEGAL), |
|
3967 |
_first(Range::end()), |
|
3968 |
_use_pos_and_kinds(12), |
|
3969 |
_current(Range::end()), |
|
3970 |
_next(_end), |
|
3971 |
_state(invalidState), |
|
3972 |
_assigned_reg(LinearScan::any_reg), |
|
3973 |
_assigned_regHi(LinearScan::any_reg), |
|
3974 |
_cached_to(-1), |
|
3975 |
_cached_opr(LIR_OprFact::illegalOpr), |
|
3976 |
_cached_vm_reg(VMRegImpl::Bad()), |
|
3977 |
_split_children(0), |
|
3978 |
_canonical_spill_slot(-1), |
|
3979 |
_insert_move_when_activated(false), |
|
3980 |
_register_hint(NULL), |
|
3981 |
_spill_state(noDefinitionFound), |
|
3982 |
_spill_definition_pos(-1) |
|
3983 |
{ |
|
3984 |
_split_parent = this; |
|
3985 |
_current_split_child = this; |
|
3986 |
} |
|
3987 |
||
3988 |
int Interval::calc_to() { |
|
3989 |
assert(_first != Range::end(), "interval has no range"); |
|
3990 |
||
3991 |
Range* r = _first; |
|
3992 |
while (r->next() != Range::end()) { |
|
3993 |
r = r->next(); |
|
3994 |
} |
|
3995 |
return r->to(); |
|
3996 |
} |
|
3997 |
||
3998 |
||
3999 |
#ifdef ASSERT |
|
4000 |
// consistency check of split-children |
|
4001 |
void Interval::check_split_children() { |
|
4002 |
if (_split_children.length() > 0) { |
|
4003 |
assert(is_split_parent(), "only split parents can have children"); |
|
4004 |
||
4005 |
for (int i = 0; i < _split_children.length(); i++) { |
|
4006 |
Interval* i1 = _split_children.at(i); |
|
4007 |
||
4008 |
assert(i1->split_parent() == this, "not a split child of this interval"); |
|
4009 |
assert(i1->type() == type(), "must be equal for all split children"); |
|
4010 |
assert(i1->canonical_spill_slot() == canonical_spill_slot(), "must be equal for all split children"); |
|
4011 |
||
4012 |
for (int j = i + 1; j < _split_children.length(); j++) { |
|
4013 |
Interval* i2 = _split_children.at(j); |
|
4014 |
||
4015 |
assert(i1->reg_num() != i2->reg_num(), "same register number"); |
|
4016 |
||
4017 |
if (i1->from() < i2->from()) { |
|
4018 |
assert(i1->to() <= i2->from() && i1->to() < i2->to(), "intervals overlapping"); |
|
4019 |
} else { |
|
4020 |
assert(i2->from() < i1->from(), "intervals start at same op_id"); |
|
4021 |
assert(i2->to() <= i1->from() && i2->to() < i1->to(), "intervals overlapping"); |
|
4022 |
} |
|
4023 |
} |
|
4024 |
} |
|
4025 |
} |
|
4026 |
} |
|
4027 |
#endif // ASSERT |
|
4028 |
||
4029 |
Interval* Interval::register_hint(bool search_split_child) const { |
|
4030 |
if (!search_split_child) { |
|
4031 |
return _register_hint; |
|
4032 |
} |
|
4033 |
||
4034 |
if (_register_hint != NULL) { |
|
4035 |
assert(_register_hint->is_split_parent(), "ony split parents are valid hint registers"); |
|
4036 |
||
4037 |
if (_register_hint->assigned_reg() >= 0 && _register_hint->assigned_reg() < LinearScan::nof_regs) { |
|
4038 |
return _register_hint; |
|
4039 |
||
4040 |
} else if (_register_hint->_split_children.length() > 0) { |
|
4041 |
// search the first split child that has a register assigned |
|
4042 |
int len = _register_hint->_split_children.length(); |
|
4043 |
for (int i = 0; i < len; i++) { |
|
4044 |
Interval* cur = _register_hint->_split_children.at(i); |
|
4045 |
||
4046 |
if (cur->assigned_reg() >= 0 && cur->assigned_reg() < LinearScan::nof_regs) { |
|
4047 |
return cur; |
|
4048 |
} |
|
4049 |
} |
|
4050 |
} |
|
4051 |
} |
|
4052 |
||
4053 |
// no hint interval found that has a register assigned |
|
4054 |
return NULL; |
|
4055 |
} |
|
4056 |
||
4057 |
||
4058 |
Interval* Interval::split_child_at_op_id(int op_id, LIR_OpVisitState::OprMode mode) { |
|
4059 |
assert(is_split_parent(), "can only be called for split parents"); |
|
4060 |
assert(op_id >= 0, "invalid op_id (method can not be called for spill moves)"); |
|
4061 |
||
4062 |
Interval* result; |
|
4063 |
if (_split_children.length() == 0) { |
|
4064 |
result = this; |
|
4065 |
} else { |
|
4066 |
result = NULL; |
|
4067 |
int len = _split_children.length(); |
|
4068 |
||
4069 |
// in outputMode, the end of the interval (op_id == cur->to()) is not valid |
|
4070 |
int to_offset = (mode == LIR_OpVisitState::outputMode ? 0 : 1); |
|
4071 |
||
4072 |
int i; |
|
4073 |
for (i = 0; i < len; i++) { |
|
4074 |
Interval* cur = _split_children.at(i); |
|
4075 |
if (cur->from() <= op_id && op_id < cur->to() + to_offset) { |
|
4076 |
if (i > 0) { |
|
4077 |
// exchange current split child to start of list (faster access for next call) |
|
4078 |
_split_children.at_put(i, _split_children.at(0)); |
|
4079 |
_split_children.at_put(0, cur); |
|
4080 |
} |
|
4081 |
||
4082 |
// interval found |
|
4083 |
result = cur; |
|
4084 |
break; |
|
4085 |
} |
|
4086 |
} |
|
4087 |
||
4088 |
#ifdef ASSERT |
|
4089 |
for (i = 0; i < len; i++) { |
|
4090 |
Interval* tmp = _split_children.at(i); |
|
4091 |
if (tmp != result && tmp->from() <= op_id && op_id < tmp->to() + to_offset) { |
|
4092 |
tty->print_cr("two valid result intervals found for op_id %d: %d and %d", op_id, result->reg_num(), tmp->reg_num()); |
|
4093 |
result->print(); |
|
4094 |
tmp->print(); |
|
4095 |
assert(false, "two valid result intervals found"); |
|
4096 |
} |
|
4097 |
} |
|
4098 |
#endif |
|
4099 |
} |
|
4100 |
||
4101 |
assert(result != NULL, "no matching interval found"); |
|
4102 |
assert(result->covers(op_id, mode), "op_id not covered by interval"); |
|
4103 |
||
4104 |
return result; |
|
4105 |
} |
|
4106 |
||
4107 |
||
4108 |
// returns the last split child that ends before the given op_id |
|
4109 |
Interval* Interval::split_child_before_op_id(int op_id) { |
|
4110 |
assert(op_id >= 0, "invalid op_id"); |
|
4111 |
||
4112 |
Interval* parent = split_parent(); |
|
4113 |
Interval* result = NULL; |
|
4114 |
||
4115 |
int len = parent->_split_children.length(); |
|
4116 |
assert(len > 0, "no split children available"); |
|
4117 |
||
4118 |
for (int i = len - 1; i >= 0; i--) { |
|
4119 |
Interval* cur = parent->_split_children.at(i); |
|
4120 |
if (cur->to() <= op_id && (result == NULL || result->to() < cur->to())) { |
|
4121 |
result = cur; |
|
4122 |
} |
|
4123 |
} |
|
4124 |
||
4125 |
assert(result != NULL, "no split child found"); |
|
4126 |
return result; |
|
4127 |
} |
|
4128 |
||
4129 |
||
4130 |
// checks if op_id is covered by any split child |
|
4131 |
bool Interval::split_child_covers(int op_id, LIR_OpVisitState::OprMode mode) { |
|
4132 |
assert(is_split_parent(), "can only be called for split parents"); |
|
4133 |
assert(op_id >= 0, "invalid op_id (method can not be called for spill moves)"); |
|
4134 |
||
4135 |
if (_split_children.length() == 0) { |
|
4136 |
// simple case if interval was not split |
|
4137 |
return covers(op_id, mode); |
|
4138 |
||
4139 |
} else { |
|
4140 |
// extended case: check all split children |
|
4141 |
int len = _split_children.length(); |
|
4142 |
for (int i = 0; i < len; i++) { |
|
4143 |
Interval* cur = _split_children.at(i); |
|
4144 |
if (cur->covers(op_id, mode)) { |
|
4145 |
return true; |
|
4146 |
} |
|
4147 |
} |
|
4148 |
return false; |
|
4149 |
} |
|
4150 |
} |
|
4151 |
||
4152 |
||
4153 |
// Note: use positions are sorted descending -> first use has highest index |
|
4154 |
int Interval::first_usage(IntervalUseKind min_use_kind) const { |
|
4155 |
assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals"); |
|
4156 |
||
4157 |
for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) { |
|
4158 |
if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) { |
|
4159 |
return _use_pos_and_kinds.at(i); |
|
4160 |
} |
|
4161 |
} |
|
4162 |
return max_jint; |
|
4163 |
} |
|
4164 |
||
4165 |
int Interval::next_usage(IntervalUseKind min_use_kind, int from) const { |
|
4166 |
assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals"); |
|
4167 |
||
4168 |
for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) { |
|
4169 |
if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) >= min_use_kind) { |
|
4170 |
return _use_pos_and_kinds.at(i); |
|
4171 |
} |
|
4172 |
} |
|
4173 |
return max_jint; |
|
4174 |
} |
|
4175 |
||
4176 |
int Interval::next_usage_exact(IntervalUseKind exact_use_kind, int from) const { |
|
4177 |
assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals"); |
|
4178 |
||
4179 |
for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) { |
|
4180 |
if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) == exact_use_kind) { |
|
4181 |
return _use_pos_and_kinds.at(i); |
|
4182 |
} |
|
4183 |
} |
|
4184 |
return max_jint; |
|
4185 |
} |
|
4186 |
||
4187 |
int Interval::previous_usage(IntervalUseKind min_use_kind, int from) const { |
|
4188 |
assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals"); |
|
4189 |
||
4190 |
int prev = 0; |
|
4191 |
for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) { |
|
4192 |
if (_use_pos_and_kinds.at(i) > from) { |
|
4193 |
return prev; |
|
4194 |
} |
|
4195 |
if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) { |
|
4196 |
prev = _use_pos_and_kinds.at(i); |
|
4197 |
} |
|
4198 |
} |
|
4199 |
return prev; |
|
4200 |
} |
|
4201 |
||
4202 |
void Interval::add_use_pos(int pos, IntervalUseKind use_kind) { |
|
4203 |
assert(covers(pos, LIR_OpVisitState::inputMode), "use position not covered by live range"); |
|
4204 |
||
4205 |
// do not add use positions for precolored intervals because |
|
4206 |
// they are never used |
|
4207 |
if (use_kind != noUse && reg_num() >= LIR_OprDesc::vreg_base) { |
|
4208 |
#ifdef ASSERT |
|
4209 |
assert(_use_pos_and_kinds.length() % 2 == 0, "must be"); |
|
4210 |
for (int i = 0; i < _use_pos_and_kinds.length(); i += 2) { |
|
4211 |
assert(pos <= _use_pos_and_kinds.at(i), "already added a use-position with lower position"); |
|
4212 |
assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind"); |
|
4213 |
if (i > 0) { |
|
4214 |
assert(_use_pos_and_kinds.at(i) < _use_pos_and_kinds.at(i - 2), "not sorted descending"); |
|
4215 |
} |
|
4216 |
} |
|
4217 |
#endif |
|
4218 |
||
4219 |
// Note: add_use is called in descending order, so list gets sorted |
|
4220 |
// automatically by just appending new use positions |
|
4221 |
int len = _use_pos_and_kinds.length(); |
|
4222 |
if (len == 0 || _use_pos_and_kinds.at(len - 2) > pos) { |
|
4223 |
_use_pos_and_kinds.append(pos); |
|
4224 |
_use_pos_and_kinds.append(use_kind); |
|
4225 |
} else if (_use_pos_and_kinds.at(len - 1) < use_kind) { |
|
4226 |
assert(_use_pos_and_kinds.at(len - 2) == pos, "list not sorted correctly"); |
|
4227 |
_use_pos_and_kinds.at_put(len - 1, use_kind); |
|
4228 |
} |
|
4229 |
} |
|
4230 |
} |
|
4231 |
||
4232 |
void Interval::add_range(int from, int to) { |
|
4233 |
assert(from < to, "invalid range"); |
|
4234 |
assert(first() == Range::end() || to < first()->next()->from(), "not inserting at begin of interval"); |
|
4235 |
assert(from <= first()->to(), "not inserting at begin of interval"); |
|
4236 |
||
4237 |
if (first()->from() <= to) { |
|
4238 |
// join intersecting ranges |
|
4239 |
first()->set_from(MIN2(from, first()->from())); |
|
4240 |
first()->set_to (MAX2(to, first()->to())); |
|
4241 |
} else { |
|
4242 |
// insert new range |
|
4243 |
_first = new Range(from, to, first()); |
|
4244 |
} |
|
4245 |
} |
|
4246 |
||
4247 |
Interval* Interval::new_split_child() { |
|
4248 |
// allocate new interval |
|
4249 |
Interval* result = new Interval(-1); |
|
4250 |
result->set_type(type()); |
|
4251 |
||
4252 |
Interval* parent = split_parent(); |
|
4253 |
result->_split_parent = parent; |
|
4254 |
result->set_register_hint(parent); |
|
4255 |
||
4256 |
// insert new interval in children-list of parent |
|
4257 |
if (parent->_split_children.length() == 0) { |
|
4258 |
assert(is_split_parent(), "list must be initialized at first split"); |
|
4259 |
||
4260 |
parent->_split_children = IntervalList(4); |
|
4261 |
parent->_split_children.append(this); |
|
4262 |
} |
|
4263 |
parent->_split_children.append(result); |
|
4264 |
||
4265 |
return result; |
|
4266 |
} |
|
4267 |
||
4268 |
// split this interval at the specified position and return |
|
4269 |
// the remainder as a new interval. |
|
4270 |
// |
|
4271 |
// when an interval is split, a bi-directional link is established between the original interval |
|
4272 |
// (the split parent) and the intervals that are split off this interval (the split children) |
|
4273 |
// When a split child is split again, the new created interval is also a direct child |
|
4274 |
// of the original parent (there is no tree of split children stored, but a flat list) |
|
4275 |
// All split children are spilled to the same stack slot (stored in _canonical_spill_slot) |
|
4276 |
// |
|
4277 |
// Note: The new interval has no valid reg_num |
|
4278 |
Interval* Interval::split(int split_pos) { |
|
4279 |
assert(LinearScan::is_virtual_interval(this), "cannot split fixed intervals"); |
|
4280 |
||
4281 |
// allocate new interval |
|
4282 |
Interval* result = new_split_child(); |
|
4283 |
||
4284 |
// split the ranges |
|
4285 |
Range* prev = NULL; |
|
4286 |
Range* cur = _first; |
|
4287 |
while (cur != Range::end() && cur->to() <= split_pos) { |
|
4288 |
prev = cur; |
|
4289 |
cur = cur->next(); |
|
4290 |
} |
|
4291 |
assert(cur != Range::end(), "split interval after end of last range"); |
|
4292 |
||
4293 |
if (cur->from() < split_pos) { |
|
4294 |
result->_first = new Range(split_pos, cur->to(), cur->next()); |
|
4295 |
cur->set_to(split_pos); |
|
4296 |
cur->set_next(Range::end()); |
|
4297 |
||
4298 |
} else { |
|
4299 |
assert(prev != NULL, "split before start of first range"); |
|
4300 |
result->_first = cur; |
|
4301 |
prev->set_next(Range::end()); |
|
4302 |
} |
|
4303 |
result->_current = result->_first; |
|
4304 |
_cached_to = -1; // clear cached value |
|
4305 |
||
4306 |
// split list of use positions |
|
4307 |
int total_len = _use_pos_and_kinds.length(); |
|
4308 |
int start_idx = total_len - 2; |
|
4309 |
while (start_idx >= 0 && _use_pos_and_kinds.at(start_idx) < split_pos) { |
|
4310 |
start_idx -= 2; |
|
4311 |
} |
|
4312 |
||
4313 |
intStack new_use_pos_and_kinds(total_len - start_idx); |
|
4314 |
int i; |
|
4315 |
for (i = start_idx + 2; i < total_len; i++) { |
|
4316 |
new_use_pos_and_kinds.append(_use_pos_and_kinds.at(i)); |
|
4317 |
} |
|
4318 |
||
4319 |
_use_pos_and_kinds.truncate(start_idx + 2); |
|
4320 |
result->_use_pos_and_kinds = _use_pos_and_kinds; |
|
4321 |
_use_pos_and_kinds = new_use_pos_and_kinds; |
|
4322 |
||
4323 |
#ifdef ASSERT |
|
4324 |
assert(_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos"); |
|
4325 |
assert(result->_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos"); |
|
4326 |
assert(_use_pos_and_kinds.length() + result->_use_pos_and_kinds.length() == total_len, "missed some entries"); |
|
4327 |
||
4328 |
for (i = 0; i < _use_pos_and_kinds.length(); i += 2) { |
|
4329 |
assert(_use_pos_and_kinds.at(i) < split_pos, "must be"); |
|
4330 |
assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind"); |
|
4331 |
} |
|
4332 |
for (i = 0; i < result->_use_pos_and_kinds.length(); i += 2) { |
|
4333 |
assert(result->_use_pos_and_kinds.at(i) >= split_pos, "must be"); |
|
4334 |
assert(result->_use_pos_and_kinds.at(i + 1) >= firstValidKind && result->_use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind"); |
|
4335 |
} |
|
4336 |
#endif |
|
4337 |
||
4338 |
return result; |
|
4339 |
} |
|
4340 |
||
4341 |
// split this interval at the specified position and return |
|
4342 |
// the head as a new interval (the original interval is the tail) |
|
4343 |
// |
|
4344 |
// Currently, only the first range can be split, and the new interval |
|
4345 |
// must not have split positions |
|
4346 |
Interval* Interval::split_from_start(int split_pos) { |
|
4347 |
assert(LinearScan::is_virtual_interval(this), "cannot split fixed intervals"); |
|
4348 |
assert(split_pos > from() && split_pos < to(), "can only split inside interval"); |
|
4349 |
assert(split_pos > _first->from() && split_pos <= _first->to(), "can only split inside first range"); |
|
4350 |
assert(first_usage(noUse) > split_pos, "can not split when use positions are present"); |
|
4351 |
||
4352 |
// allocate new interval |
|
4353 |
Interval* result = new_split_child(); |
|
4354 |
||
4355 |
// the new created interval has only one range (checked by assertion above), |
|
4356 |
// so the splitting of the ranges is very simple |
|
4357 |
result->add_range(_first->from(), split_pos); |
|
4358 |
||
4359 |
if (split_pos == _first->to()) { |
|
4360 |
assert(_first->next() != Range::end(), "must not be at end"); |
|
4361 |
_first = _first->next(); |
|
4362 |
} else { |
|
4363 |
_first->set_from(split_pos); |
|
4364 |
} |
|
4365 |
||
4366 |
return result; |
|
4367 |
} |
|
4368 |
||
4369 |
||
4370 |
// returns true if the op_id is inside the interval |
|
4371 |
bool Interval::covers(int op_id, LIR_OpVisitState::OprMode mode) const { |
|
4372 |
Range* cur = _first; |
|
4373 |
||
4374 |
while (cur != Range::end() && cur->to() < op_id) { |
|
4375 |
cur = cur->next(); |
|
4376 |
} |
|
4377 |
if (cur != Range::end()) { |
|
4378 |
assert(cur->to() != cur->next()->from(), "ranges not separated"); |
|
4379 |
||
4380 |
if (mode == LIR_OpVisitState::outputMode) { |
|
4381 |
return cur->from() <= op_id && op_id < cur->to(); |
|
4382 |
} else { |
|
4383 |
return cur->from() <= op_id && op_id <= cur->to(); |
|
4384 |
} |
|
4385 |
} |
|
4386 |
return false; |
|
4387 |
} |
|
4388 |
||
4389 |
// returns true if the interval has any hole between hole_from and hole_to |
|
4390 |
// (even if the hole has only the length 1) |
|
4391 |
bool Interval::has_hole_between(int hole_from, int hole_to) { |
|
4392 |
assert(hole_from < hole_to, "check"); |
|
4393 |
assert(from() <= hole_from && hole_to <= to(), "index out of interval"); |
|
4394 |
||
4395 |
Range* cur = _first; |
|
4396 |
while (cur != Range::end()) { |
|
4397 |
assert(cur->to() < cur->next()->from(), "no space between ranges"); |
|
4398 |
||
4399 |
// hole-range starts before this range -> hole |
|
4400 |
if (hole_from < cur->from()) { |
|
4401 |
return true; |
|
4402 |
||
4403 |
// hole-range completely inside this range -> no hole |
|
4404 |
} else if (hole_to <= cur->to()) { |
|
4405 |
return false; |
|
4406 |
||
4407 |
// overlapping of hole-range with this range -> hole |
|
4408 |
} else if (hole_from <= cur->to()) { |
|
4409 |
return true; |
|
4410 |
} |
|
4411 |
||
4412 |
cur = cur->next(); |
|
4413 |
} |
|
4414 |
||
4415 |
return false; |
|
4416 |
} |
|
4417 |
||
4418 |
||
4419 |
#ifndef PRODUCT |
|
4420 |
void Interval::print(outputStream* out) const { |
|
4421 |
const char* SpillState2Name[] = { "no definition", "no spill store", "one spill store", "store at definition", "start in memory", "no optimization" }; |
|
4422 |
const char* UseKind2Name[] = { "N", "L", "S", "M" }; |
|
4423 |
||
4424 |
const char* type_name; |
|
4425 |
LIR_Opr opr = LIR_OprFact::illegal(); |
|
4426 |
if (reg_num() < LIR_OprDesc::vreg_base) { |
|
4427 |
type_name = "fixed"; |
|
4428 |
// need a temporary operand for fixed intervals because type() cannot be called |
|
4429 |
if (assigned_reg() >= pd_first_cpu_reg && assigned_reg() <= pd_last_cpu_reg) { |
|
4430 |
opr = LIR_OprFact::single_cpu(assigned_reg()); |
|
4431 |
} else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) { |
|
4432 |
opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg); |
|
1066 | 4433 |
#ifdef X86 |
1 | 4434 |
} else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) { |
4435 |
opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg); |
|
4436 |
#endif |
|
4437 |
} else { |
|
4438 |
ShouldNotReachHere(); |
|
4439 |
} |
|
4440 |
} else { |
|
4441 |
type_name = type2name(type()); |
|
4442 |
if (assigned_reg() != -1) { |
|
4443 |
opr = LinearScan::calc_operand_for_interval(this); |
|
4444 |
} |
|
4445 |
} |
|
4446 |
||
4447 |
out->print("%d %s ", reg_num(), type_name); |
|
4448 |
if (opr->is_valid()) { |
|
4449 |
out->print("\""); |
|
4450 |
opr->print(out); |
|
4451 |
out->print("\" "); |
|
4452 |
} |
|
4453 |
out->print("%d %d ", split_parent()->reg_num(), (register_hint(false) != NULL ? register_hint(false)->reg_num() : -1)); |
|
4454 |
||
4455 |
// print ranges |
|
4456 |
Range* cur = _first; |
|
4457 |
while (cur != Range::end()) { |
|
4458 |
cur->print(out); |
|
4459 |
cur = cur->next(); |
|
4460 |
assert(cur != NULL, "range list not closed with range sentinel"); |
|
4461 |
} |
|
4462 |
||
4463 |
// print use positions |
|
4464 |
int prev = 0; |
|
4465 |
assert(_use_pos_and_kinds.length() % 2 == 0, "must be"); |
|
4466 |
for (int i =_use_pos_and_kinds.length() - 2; i >= 0; i -= 2) { |
|
4467 |
assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind"); |
|
4468 |
assert(prev < _use_pos_and_kinds.at(i), "use positions not sorted"); |
|
4469 |
||
4470 |
out->print("%d %s ", _use_pos_and_kinds.at(i), UseKind2Name[_use_pos_and_kinds.at(i + 1)]); |
|
4471 |
prev = _use_pos_and_kinds.at(i); |
|
4472 |
} |
|
4473 |
||
4474 |
out->print(" \"%s\"", SpillState2Name[spill_state()]); |
|
4475 |
out->cr(); |
|
4476 |
} |
|
4477 |
#endif |
|
4478 |
||
4479 |
||
4480 |
||
4481 |
// **** Implementation of IntervalWalker **************************** |
|
4482 |
||
4483 |
IntervalWalker::IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first) |
|
4484 |
: _compilation(allocator->compilation()) |
|
4485 |
, _allocator(allocator) |
|
4486 |
{ |
|
4487 |
_unhandled_first[fixedKind] = unhandled_fixed_first; |
|
4488 |
_unhandled_first[anyKind] = unhandled_any_first; |
|
4489 |
_active_first[fixedKind] = Interval::end(); |
|
4490 |
_inactive_first[fixedKind] = Interval::end(); |
|
4491 |
_active_first[anyKind] = Interval::end(); |
|
4492 |
_inactive_first[anyKind] = Interval::end(); |
|
4493 |
_current_position = -1; |
|
4494 |
_current = NULL; |
|
4495 |
next_interval(); |
|
4496 |
} |
|
4497 |
||
4498 |
||
4499 |
// append interval at top of list |
|
4500 |
void IntervalWalker::append_unsorted(Interval** list, Interval* interval) { |
|
4501 |
interval->set_next(*list); *list = interval; |
|
4502 |
} |
|
4503 |
||
4504 |
||
4505 |
// append interval in order of current range from() |
|
4506 |
void IntervalWalker::append_sorted(Interval** list, Interval* interval) { |
|
4507 |
Interval* prev = NULL; |
|
4508 |
Interval* cur = *list; |
|
4509 |
while (cur->current_from() < interval->current_from()) { |
|
4510 |
prev = cur; cur = cur->next(); |
|
4511 |
} |
|
4512 |
if (prev == NULL) { |
|
4513 |
*list = interval; |
|
4514 |
} else { |
|
4515 |
prev->set_next(interval); |
|
4516 |
} |
|
4517 |
interval->set_next(cur); |
|
4518 |
} |
|
4519 |
||
4520 |
void IntervalWalker::append_to_unhandled(Interval** list, Interval* interval) { |
|
4521 |
assert(interval->from() >= current()->current_from(), "cannot append new interval before current walk position"); |
|
4522 |
||
4523 |
Interval* prev = NULL; |
|
4524 |
Interval* cur = *list; |
|
4525 |
while (cur->from() < interval->from() || (cur->from() == interval->from() && cur->first_usage(noUse) < interval->first_usage(noUse))) { |
|
4526 |
prev = cur; cur = cur->next(); |
|
4527 |
} |
|
4528 |
if (prev == NULL) { |
|
4529 |
*list = interval; |
|
4530 |
} else { |
|
4531 |
prev->set_next(interval); |
|
4532 |
} |
|
4533 |
interval->set_next(cur); |
|
4534 |
} |
|
4535 |
||
4536 |
||
4537 |
inline bool IntervalWalker::remove_from_list(Interval** list, Interval* i) { |
|
4538 |
while (*list != Interval::end() && *list != i) { |
|
4539 |
list = (*list)->next_addr(); |
|
4540 |
} |
|
4541 |
if (*list != Interval::end()) { |
|
4542 |
assert(*list == i, "check"); |
|
4543 |
*list = (*list)->next(); |
|
4544 |
return true; |
|
4545 |
} else { |
|
4546 |
return false; |
|
4547 |
} |
|
4548 |
} |
|
4549 |
||
4550 |
void IntervalWalker::remove_from_list(Interval* i) { |
|
4551 |
bool deleted; |
|
4552 |
||
4553 |
if (i->state() == activeState) { |
|
4554 |
deleted = remove_from_list(active_first_addr(anyKind), i); |
|
4555 |
} else { |
|
4556 |
assert(i->state() == inactiveState, "invalid state"); |
|
4557 |
deleted = remove_from_list(inactive_first_addr(anyKind), i); |
|
4558 |
} |
|
4559 |
||
4560 |
assert(deleted, "interval has not been found in list"); |
|
4561 |
} |
|
4562 |
||
4563 |
||
4564 |
void IntervalWalker::walk_to(IntervalState state, int from) { |
|
4565 |
assert (state == activeState || state == inactiveState, "wrong state"); |
|
4566 |
for_each_interval_kind(kind) { |
|
4567 |
Interval** prev = state == activeState ? active_first_addr(kind) : inactive_first_addr(kind); |
|
4568 |
Interval* next = *prev; |
|
4569 |
while (next->current_from() <= from) { |
|
4570 |
Interval* cur = next; |
|
4571 |
next = cur->next(); |
|
4572 |
||
4573 |
bool range_has_changed = false; |
|
4574 |
while (cur->current_to() <= from) { |
|
4575 |
cur->next_range(); |
|
4576 |
range_has_changed = true; |
|
4577 |
} |
|
4578 |
||
4579 |
// also handle move from inactive list to active list |
|
4580 |
range_has_changed = range_has_changed || (state == inactiveState && cur->current_from() <= from); |
|
4581 |
||
4582 |
if (range_has_changed) { |
|
4583 |
// remove cur from list |
|
4584 |
*prev = next; |
|
4585 |
if (cur->current_at_end()) { |
|
4586 |
// move to handled state (not maintained as a list) |
|
4587 |
cur->set_state(handledState); |
|
4588 |
interval_moved(cur, kind, state, handledState); |
|
4589 |
} else if (cur->current_from() <= from){ |
|
4590 |
// sort into active list |
|
4591 |
append_sorted(active_first_addr(kind), cur); |
|
4592 |
cur->set_state(activeState); |
|
4593 |
if (*prev == cur) { |
|
4594 |
assert(state == activeState, "check"); |
|
4595 |
prev = cur->next_addr(); |
|
4596 |
} |
|
4597 |
interval_moved(cur, kind, state, activeState); |
|
4598 |
} else { |
|
4599 |
// sort into inactive list |
|
4600 |
append_sorted(inactive_first_addr(kind), cur); |
|
4601 |
cur->set_state(inactiveState); |
|
4602 |
if (*prev == cur) { |
|
4603 |
assert(state == inactiveState, "check"); |
|
4604 |
prev = cur->next_addr(); |
|
4605 |
} |
|
4606 |
interval_moved(cur, kind, state, inactiveState); |
|
4607 |
} |
|
4608 |
} else { |
|
4609 |
prev = cur->next_addr(); |
|
4610 |
continue; |
|
4611 |
} |
|
4612 |
} |
|
4613 |
} |
|
4614 |
} |
|
4615 |
||
4616 |
||
4617 |
void IntervalWalker::next_interval() { |
|
4618 |
IntervalKind kind; |
|
4619 |
Interval* any = _unhandled_first[anyKind]; |
|
4620 |
Interval* fixed = _unhandled_first[fixedKind]; |
|
4621 |
||
4622 |
if (any != Interval::end()) { |
|
4623 |
// intervals may start at same position -> prefer fixed interval |
|
4624 |
kind = fixed != Interval::end() && fixed->from() <= any->from() ? fixedKind : anyKind; |
|
4625 |
||
4626 |
assert (kind == fixedKind && fixed->from() <= any->from() || |
|
4627 |
kind == anyKind && any->from() <= fixed->from(), "wrong interval!!!"); |
|
4628 |
assert(any == Interval::end() || fixed == Interval::end() || any->from() != fixed->from() || kind == fixedKind, "if fixed and any-Interval start at same position, fixed must be processed first"); |
|
4629 |
||
4630 |
} else if (fixed != Interval::end()) { |
|
4631 |
kind = fixedKind; |
|
4632 |
} else { |
|
4633 |
_current = NULL; return; |
|
4634 |
} |
|
4635 |
_current_kind = kind; |
|
4636 |
_current = _unhandled_first[kind]; |
|
4637 |
_unhandled_first[kind] = _current->next(); |
|
4638 |
_current->set_next(Interval::end()); |
|
4639 |
_current->rewind_range(); |
|
4640 |
} |
|
4641 |
||
4642 |
||
4643 |
void IntervalWalker::walk_to(int lir_op_id) { |
|
4644 |
assert(_current_position <= lir_op_id, "can not walk backwards"); |
|
4645 |
while (current() != NULL) { |
|
4646 |
bool is_active = current()->from() <= lir_op_id; |
|
4647 |
int id = is_active ? current()->from() : lir_op_id; |
|
4648 |
||
4649 |
TRACE_LINEAR_SCAN(2, if (_current_position < id) { tty->cr(); tty->print_cr("walk_to(%d) **************************************************************", id); }) |
|
4650 |
||
4651 |
// set _current_position prior to call of walk_to |
|
4652 |
_current_position = id; |
|
4653 |
||
4654 |
// call walk_to even if _current_position == id |
|
4655 |
walk_to(activeState, id); |
|
4656 |
walk_to(inactiveState, id); |
|
4657 |
||
4658 |
if (is_active) { |
|
4659 |
current()->set_state(activeState); |
|
4660 |
if (activate_current()) { |
|
4661 |
append_sorted(active_first_addr(current_kind()), current()); |
|
4662 |
interval_moved(current(), current_kind(), unhandledState, activeState); |
|
4663 |
} |
|
4664 |
||
4665 |
next_interval(); |
|
4666 |
} else { |
|
4667 |
return; |
|
4668 |
} |
|
4669 |
} |
|
4670 |
} |
|
4671 |
||
4672 |
void IntervalWalker::interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to) { |
|
4673 |
#ifndef PRODUCT |
|
4674 |
if (TraceLinearScanLevel >= 4) { |
|
4675 |
#define print_state(state) \ |
|
4676 |
switch(state) {\ |
|
4677 |
case unhandledState: tty->print("unhandled"); break;\ |
|
4678 |
case activeState: tty->print("active"); break;\ |
|
4679 |
case inactiveState: tty->print("inactive"); break;\ |
|
4680 |
case handledState: tty->print("handled"); break;\ |
|
4681 |
default: ShouldNotReachHere(); \ |
|
4682 |
} |
|
4683 |
||
4684 |
print_state(from); tty->print(" to "); print_state(to); |
|
4685 |
tty->fill_to(23); |
|
4686 |
interval->print(); |
|
4687 |
||
4688 |
#undef print_state |
|
4689 |
} |
|
4690 |
#endif |
|
4691 |
} |
|
4692 |
||
4693 |
||
4694 |
||
4695 |
// **** Implementation of LinearScanWalker ************************** |
|
4696 |
||
4697 |
LinearScanWalker::LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first) |
|
4698 |
: IntervalWalker(allocator, unhandled_fixed_first, unhandled_any_first) |
|
4699 |
, _move_resolver(allocator) |
|
4700 |
{ |
|
4701 |
for (int i = 0; i < LinearScan::nof_regs; i++) { |
|
4702 |
_spill_intervals[i] = new IntervalList(2); |
|
4703 |
} |
|
4704 |
} |
|
4705 |
||
4706 |
||
4707 |
inline void LinearScanWalker::init_use_lists(bool only_process_use_pos) { |
|
4708 |
for (int i = _first_reg; i <= _last_reg; i++) { |
|
4709 |
_use_pos[i] = max_jint; |
|
4710 |
||
4711 |
if (!only_process_use_pos) { |
|
4712 |
_block_pos[i] = max_jint; |
|
4713 |
_spill_intervals[i]->clear(); |
|
4714 |
} |
|
4715 |
} |
|
4716 |
} |
|
4717 |
||
4718 |
inline void LinearScanWalker::exclude_from_use(int reg) { |
|
4719 |
assert(reg < LinearScan::nof_regs, "interval must have a register assigned (stack slots not allowed)"); |
|
4720 |
if (reg >= _first_reg && reg <= _last_reg) { |
|
4721 |
_use_pos[reg] = 0; |
|
4722 |
} |
|
4723 |
} |
|
4724 |
inline void LinearScanWalker::exclude_from_use(Interval* i) { |
|
4725 |
assert(i->assigned_reg() != any_reg, "interval has no register assigned"); |
|
4726 |
||
4727 |
exclude_from_use(i->assigned_reg()); |
|
4728 |
exclude_from_use(i->assigned_regHi()); |
|
4729 |
} |
|
4730 |
||
4731 |
inline void LinearScanWalker::set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos) { |
|
4732 |
assert(use_pos != 0, "must use exclude_from_use to set use_pos to 0"); |
|
4733 |
||
4734 |
if (reg >= _first_reg && reg <= _last_reg) { |
|
4735 |
if (_use_pos[reg] > use_pos) { |
|
4736 |
_use_pos[reg] = use_pos; |
|
4737 |
} |
|
4738 |
if (!only_process_use_pos) { |
|
4739 |
_spill_intervals[reg]->append(i); |
|
4740 |
} |
|
4741 |
} |
|
4742 |
} |
|
4743 |
inline void LinearScanWalker::set_use_pos(Interval* i, int use_pos, bool only_process_use_pos) { |
|
4744 |
assert(i->assigned_reg() != any_reg, "interval has no register assigned"); |
|
4745 |
if (use_pos != -1) { |
|
4746 |
set_use_pos(i->assigned_reg(), i, use_pos, only_process_use_pos); |
|
4747 |
set_use_pos(i->assigned_regHi(), i, use_pos, only_process_use_pos); |
|
4748 |
} |
|
4749 |
} |
|
4750 |
||
4751 |
inline void LinearScanWalker::set_block_pos(int reg, Interval* i, int block_pos) { |
|
4752 |
if (reg >= _first_reg && reg <= _last_reg) { |
|
4753 |
if (_block_pos[reg] > block_pos) { |
|
4754 |
_block_pos[reg] = block_pos; |
|
4755 |
} |
|
4756 |
if (_use_pos[reg] > block_pos) { |
|
4757 |
_use_pos[reg] = block_pos; |
|
4758 |
} |
|
4759 |
} |
|
4760 |
} |
|
4761 |
inline void LinearScanWalker::set_block_pos(Interval* i, int block_pos) { |
|
4762 |
assert(i->assigned_reg() != any_reg, "interval has no register assigned"); |
|
4763 |
if (block_pos != -1) { |
|
4764 |
set_block_pos(i->assigned_reg(), i, block_pos); |
|
4765 |
set_block_pos(i->assigned_regHi(), i, block_pos); |
|
4766 |
} |
|
4767 |
} |
|
4768 |
||
4769 |
||
4770 |
void LinearScanWalker::free_exclude_active_fixed() { |
|
4771 |
Interval* list = active_first(fixedKind); |
|
4772 |
while (list != Interval::end()) { |
|
4773 |
assert(list->assigned_reg() < LinearScan::nof_regs, "active interval must have a register assigned"); |
|
4774 |
exclude_from_use(list); |
|
4775 |
list = list->next(); |
|
4776 |
} |
|
4777 |
} |
|
4778 |
||
4779 |
void LinearScanWalker::free_exclude_active_any() { |
|
4780 |
Interval* list = active_first(anyKind); |
|
4781 |
while (list != Interval::end()) { |
|
4782 |
exclude_from_use(list); |
|
4783 |
list = list->next(); |
|
4784 |
} |
|
4785 |
} |
|
4786 |
||
4787 |
void LinearScanWalker::free_collect_inactive_fixed(Interval* cur) { |
|
4788 |
Interval* list = inactive_first(fixedKind); |
|
4789 |
while (list != Interval::end()) { |
|
4790 |
if (cur->to() <= list->current_from()) { |
|
4791 |
assert(list->current_intersects_at(cur) == -1, "must not intersect"); |
|
4792 |
set_use_pos(list, list->current_from(), true); |
|
4793 |
} else { |
|
4794 |
set_use_pos(list, list->current_intersects_at(cur), true); |
|
4795 |
} |
|
4796 |
list = list->next(); |
|
4797 |
} |
|
4798 |
} |
|
4799 |
||
4800 |
void LinearScanWalker::free_collect_inactive_any(Interval* cur) { |
|
4801 |
Interval* list = inactive_first(anyKind); |
|
4802 |
while (list != Interval::end()) { |
|
4803 |
set_use_pos(list, list->current_intersects_at(cur), true); |
|
4804 |
list = list->next(); |
|
4805 |
} |
|
4806 |
} |
|
4807 |
||
4808 |
void LinearScanWalker::free_collect_unhandled(IntervalKind kind, Interval* cur) { |
|
4809 |
Interval* list = unhandled_first(kind); |
|
4810 |
while (list != Interval::end()) { |
|
4811 |
set_use_pos(list, list->intersects_at(cur), true); |
|
4812 |
if (kind == fixedKind && cur->to() <= list->from()) { |
|
4813 |
set_use_pos(list, list->from(), true); |
|
4814 |
} |
|
4815 |
list = list->next(); |
|
4816 |
} |
|
4817 |
} |
|
4818 |
||
4819 |
void LinearScanWalker::spill_exclude_active_fixed() { |
|
4820 |
Interval* list = active_first(fixedKind); |
|
4821 |
while (list != Interval::end()) { |
|
4822 |
exclude_from_use(list); |
|
4823 |
list = list->next(); |
|
4824 |
} |
|
4825 |
} |
|
4826 |
||
4827 |
void LinearScanWalker::spill_block_unhandled_fixed(Interval* cur) { |
|
4828 |
Interval* list = unhandled_first(fixedKind); |
|
4829 |
while (list != Interval::end()) { |
|
4830 |
set_block_pos(list, list->intersects_at(cur)); |
|
4831 |
list = list->next(); |
|
4832 |
} |
|
4833 |
} |
|
4834 |
||
4835 |
void LinearScanWalker::spill_block_inactive_fixed(Interval* cur) { |
|
4836 |
Interval* list = inactive_first(fixedKind); |
|
4837 |
while (list != Interval::end()) { |
|
4838 |
if (cur->to() > list->current_from()) { |
|
4839 |
set_block_pos(list, list->current_intersects_at(cur)); |
|
4840 |
} else { |
|
4841 |
assert(list->current_intersects_at(cur) == -1, "invalid optimization: intervals intersect"); |
|
4842 |
} |
|
4843 |
||
4844 |
list = list->next(); |
|
4845 |
} |
|
4846 |
} |
|
4847 |
||
4848 |
void LinearScanWalker::spill_collect_active_any() { |
|
4849 |
Interval* list = active_first(anyKind); |
|
4850 |
while (list != Interval::end()) { |
|
4851 |
set_use_pos(list, MIN2(list->next_usage(loopEndMarker, _current_position), list->to()), false); |
|
4852 |
list = list->next(); |
|
4853 |
} |
|
4854 |
} |
|
4855 |
||
4856 |
void LinearScanWalker::spill_collect_inactive_any(Interval* cur) { |
|
4857 |
Interval* list = inactive_first(anyKind); |
|
4858 |
while (list != Interval::end()) { |
|
4859 |
if (list->current_intersects(cur)) { |
|
4860 |
set_use_pos(list, MIN2(list->next_usage(loopEndMarker, _current_position), list->to()), false); |
|
4861 |
} |
|
4862 |
list = list->next(); |
|
4863 |
} |
|
4864 |
} |
|
4865 |
||
4866 |
||
4867 |
void LinearScanWalker::insert_move(int op_id, Interval* src_it, Interval* dst_it) { |
|
4868 |
// output all moves here. When source and target are equal, the move is |
|
4869 |
// optimized away later in assign_reg_nums |
|
4870 |
||
4871 |
op_id = (op_id + 1) & ~1; |
|
4872 |
BlockBegin* op_block = allocator()->block_of_op_with_id(op_id); |
|
4873 |
assert(op_id > 0 && allocator()->block_of_op_with_id(op_id - 2) == op_block, "cannot insert move at block boundary"); |
|
4874 |
||
4875 |
// calculate index of instruction inside instruction list of current block |
|
4876 |
// the minimal index (for a block with no spill moves) can be calculated because the |
|
4877 |
// numbering of instructions is known. |
|
4878 |
// When the block already contains spill moves, the index must be increased until the |
|
4879 |
// correct index is reached. |
|
4880 |
LIR_OpList* list = op_block->lir()->instructions_list(); |
|
4881 |
int index = (op_id - list->at(0)->id()) / 2; |
|
4882 |
assert(list->at(index)->id() <= op_id, "error in calculation"); |
|
4883 |
||
4884 |
while (list->at(index)->id() != op_id) { |
|
4885 |
index++; |
|
4886 |
assert(0 <= index && index < list->length(), "index out of bounds"); |
|
4887 |
} |
|
4888 |
assert(1 <= index && index < list->length(), "index out of bounds"); |
|
4889 |
assert(list->at(index)->id() == op_id, "error in calculation"); |
|
4890 |
||
4891 |
// insert new instruction before instruction at position index |
|
4892 |
_move_resolver.move_insert_position(op_block->lir(), index - 1); |
|
4893 |
_move_resolver.add_mapping(src_it, dst_it); |
|
4894 |
} |
|
4895 |
||
4896 |
||
4897 |
int LinearScanWalker::find_optimal_split_pos(BlockBegin* min_block, BlockBegin* max_block, int max_split_pos) { |
|
4898 |
int from_block_nr = min_block->linear_scan_number(); |
|
4899 |
int to_block_nr = max_block->linear_scan_number(); |
|
4900 |
||
4901 |
assert(0 <= from_block_nr && from_block_nr < block_count(), "out of range"); |
|
4902 |
assert(0 <= to_block_nr && to_block_nr < block_count(), "out of range"); |
|
4903 |
assert(from_block_nr < to_block_nr, "must cross block boundary"); |
|
4904 |
||
4905 |
// Try to split at end of max_block. If this would be after |
|
4906 |
// max_split_pos, then use the begin of max_block |
|
4907 |
int optimal_split_pos = max_block->last_lir_instruction_id() + 2; |
|
4908 |
if (optimal_split_pos > max_split_pos) { |
|
4909 |
optimal_split_pos = max_block->first_lir_instruction_id(); |
|
4910 |
} |
|
4911 |
||
4912 |
int min_loop_depth = max_block->loop_depth(); |
|
4913 |
for (int i = to_block_nr - 1; i >= from_block_nr; i--) { |
|
4914 |
BlockBegin* cur = block_at(i); |
|
4915 |
||
4916 |
if (cur->loop_depth() < min_loop_depth) { |
|
4917 |
// block with lower loop-depth found -> split at the end of this block |
|
4918 |
min_loop_depth = cur->loop_depth(); |
|
4919 |
optimal_split_pos = cur->last_lir_instruction_id() + 2; |
|
4920 |
} |
|
4921 |
} |
|
4922 |
assert(optimal_split_pos > allocator()->max_lir_op_id() || allocator()->is_block_begin(optimal_split_pos), "algorithm must move split pos to block boundary"); |
|
4923 |
||
4924 |
return optimal_split_pos; |
|
4925 |
} |
|
4926 |
||
4927 |
||
4928 |
int LinearScanWalker::find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization) { |
|
4929 |
int optimal_split_pos = -1; |
|
4930 |
if (min_split_pos == max_split_pos) { |
|
4931 |
// trivial case, no optimization of split position possible |
|
4932 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" min-pos and max-pos are equal, no optimization possible")); |
|
4933 |
optimal_split_pos = min_split_pos; |
|
4934 |
||
4935 |
} else { |
|
4936 |
assert(min_split_pos < max_split_pos, "must be true then"); |
|
4937 |
assert(min_split_pos > 0, "cannot access min_split_pos - 1 otherwise"); |
|
4938 |
||
4939 |
// reason for using min_split_pos - 1: when the minimal split pos is exactly at the |
|
4940 |
// beginning of a block, then min_split_pos is also a possible split position. |
|
4941 |
// Use the block before as min_block, because then min_block->last_lir_instruction_id() + 2 == min_split_pos |
|
4942 |
BlockBegin* min_block = allocator()->block_of_op_with_id(min_split_pos - 1); |
|
4943 |
||
4944 |
// reason for using max_split_pos - 1: otherwise there would be an assertion failure |
|
4945 |
// when an interval ends at the end of the last block of the method |
|
4946 |
// (in this case, max_split_pos == allocator()->max_lir_op_id() + 2, and there is no |
|
4947 |
// block at this op_id) |
|
4948 |
BlockBegin* max_block = allocator()->block_of_op_with_id(max_split_pos - 1); |
|
4949 |
||
4950 |
assert(min_block->linear_scan_number() <= max_block->linear_scan_number(), "invalid order"); |
|
4951 |
if (min_block == max_block) { |
|
4952 |
// split position cannot be moved to block boundary, so split as late as possible |
|
4953 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" cannot move split pos to block boundary because min_pos and max_pos are in same block")); |
|
4954 |
optimal_split_pos = max_split_pos; |
|
4955 |
||
4956 |
} else if (it->has_hole_between(max_split_pos - 1, max_split_pos) && !allocator()->is_block_begin(max_split_pos)) { |
|
4957 |
// Do not move split position if the interval has a hole before max_split_pos. |
|
4958 |
// Intervals resulting from Phi-Functions have more than one definition (marked |
|
4959 |
// as mustHaveRegister) with a hole before each definition. When the register is needed |
|
4960 |
// for the second definition, an earlier reloading is unnecessary. |
|
4961 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" interval has hole just before max_split_pos, so splitting at max_split_pos")); |
|
4962 |
optimal_split_pos = max_split_pos; |
|
4963 |
||
4964 |
} else { |
|
4965 |
// seach optimal block boundary between min_split_pos and max_split_pos |
|
4966 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" moving split pos to optimal block boundary between block B%d and B%d", min_block->block_id(), max_block->block_id())); |
|
4967 |
||
4968 |
if (do_loop_optimization) { |
|
4969 |
// Loop optimization: if a loop-end marker is found between min- and max-position, |
|
4970 |
// then split before this loop |
|
4971 |
int loop_end_pos = it->next_usage_exact(loopEndMarker, min_block->last_lir_instruction_id() + 2); |
|
4972 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" loop optimization: loop end found at pos %d", loop_end_pos)); |
|
4973 |
||
4974 |
assert(loop_end_pos > min_split_pos, "invalid order"); |
|
4975 |
if (loop_end_pos < max_split_pos) { |
|
4976 |
// loop-end marker found between min- and max-position |
|
4977 |
// if it is not the end marker for the same loop as the min-position, then move |
|
4978 |
// the max-position to this loop block. |
|
4979 |
// Desired result: uses tagged as shouldHaveRegister inside a loop cause a reloading |
|
4980 |
// of the interval (normally, only mustHaveRegister causes a reloading) |
|
4981 |
BlockBegin* loop_block = allocator()->block_of_op_with_id(loop_end_pos); |
|
4982 |
||
4983 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" interval is used in loop that ends in block B%d, so trying to move max_block back from B%d to B%d", loop_block->block_id(), max_block->block_id(), loop_block->block_id())); |
|
4984 |
assert(loop_block != min_block, "loop_block and min_block must be different because block boundary is needed between"); |
|
4985 |
||
4986 |
optimal_split_pos = find_optimal_split_pos(min_block, loop_block, loop_block->last_lir_instruction_id() + 2); |
|
4987 |
if (optimal_split_pos == loop_block->last_lir_instruction_id() + 2) { |
|
4988 |
optimal_split_pos = -1; |
|
4989 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" loop optimization not necessary")); |
|
4990 |
} else { |
|
4991 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" loop optimization successful")); |
|
4992 |
} |
|
4993 |
} |
|
4994 |
} |
|
4995 |
||
4996 |
if (optimal_split_pos == -1) { |
|
4997 |
// not calculated by loop optimization |
|
4998 |
optimal_split_pos = find_optimal_split_pos(min_block, max_block, max_split_pos); |
|
4999 |
} |
|
5000 |
} |
|
5001 |
} |
|
5002 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" optimal split position: %d", optimal_split_pos)); |
|
5003 |
||
5004 |
return optimal_split_pos; |
|
5005 |
} |
|
5006 |
||
5007 |
||
5008 |
/* |
|
5009 |
split an interval at the optimal position between min_split_pos and |
|
5010 |
max_split_pos in two parts: |
|
5011 |
1) the left part has already a location assigned |
|
5012 |
2) the right part is sorted into to the unhandled-list |
|
5013 |
*/ |
|
5014 |
void LinearScanWalker::split_before_usage(Interval* it, int min_split_pos, int max_split_pos) { |
|
5015 |
TRACE_LINEAR_SCAN(2, tty->print ("----- splitting interval: "); it->print()); |
|
5016 |
TRACE_LINEAR_SCAN(2, tty->print_cr(" between %d and %d", min_split_pos, max_split_pos)); |
|
5017 |
||
5018 |
assert(it->from() < min_split_pos, "cannot split at start of interval"); |
|
5019 |
assert(current_position() < min_split_pos, "cannot split before current position"); |
|
5020 |
assert(min_split_pos <= max_split_pos, "invalid order"); |
|
5021 |
assert(max_split_pos <= it->to(), "cannot split after end of interval"); |
|
5022 |
||
5023 |
int optimal_split_pos = find_optimal_split_pos(it, min_split_pos, max_split_pos, true); |
|
5024 |
||
5025 |
assert(min_split_pos <= optimal_split_pos && optimal_split_pos <= max_split_pos, "out of range"); |
|
5026 |
assert(optimal_split_pos <= it->to(), "cannot split after end of interval"); |
|
5027 |
assert(optimal_split_pos > it->from(), "cannot split at start of interval"); |
|
5028 |
||
5029 |
if (optimal_split_pos == it->to() && it->next_usage(mustHaveRegister, min_split_pos) == max_jint) { |
|
5030 |
// the split position would be just before the end of the interval |
|
5031 |
// -> no split at all necessary |
|
5032 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" no split necessary because optimal split position is at end of interval")); |
|
5033 |
return; |
|
5034 |
} |
|
5035 |
||
5036 |
// must calculate this before the actual split is performed and before split position is moved to odd op_id |
|
5037 |
bool move_necessary = !allocator()->is_block_begin(optimal_split_pos) && !it->has_hole_between(optimal_split_pos - 1, optimal_split_pos); |
|
5038 |
||
5039 |
if (!allocator()->is_block_begin(optimal_split_pos)) { |
|
5040 |
// move position before actual instruction (odd op_id) |
|
5041 |
optimal_split_pos = (optimal_split_pos - 1) | 1; |
|
5042 |
} |
|
5043 |
||
5044 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" splitting at position %d", optimal_split_pos)); |
|
5045 |
assert(allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 1), "split pos must be odd when not on block boundary"); |
|
5046 |
assert(!allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 0), "split pos must be even on block boundary"); |
|
5047 |
||
5048 |
Interval* split_part = it->split(optimal_split_pos); |
|
5049 |
||
5050 |
allocator()->append_interval(split_part); |
|
5051 |
allocator()->copy_register_flags(it, split_part); |
|
5052 |
split_part->set_insert_move_when_activated(move_necessary); |
|
5053 |
append_to_unhandled(unhandled_first_addr(anyKind), split_part); |
|
5054 |
||
5055 |
TRACE_LINEAR_SCAN(2, tty->print_cr(" split interval in two parts (insert_move_when_activated: %d)", move_necessary)); |
|
5056 |
TRACE_LINEAR_SCAN(2, tty->print (" "); it->print()); |
|
5057 |
TRACE_LINEAR_SCAN(2, tty->print (" "); split_part->print()); |
|
5058 |
} |
|
5059 |
||
5060 |
/* |
|
5061 |
split an interval at the optimal position between min_split_pos and |
|
5062 |
max_split_pos in two parts: |
|
5063 |
1) the left part has already a location assigned |
|
5064 |
2) the right part is always on the stack and therefore ignored in further processing |
|
5065 |
*/ |
|
5066 |
void LinearScanWalker::split_for_spilling(Interval* it) { |
|
5067 |
// calculate allowed range of splitting position |
|
5068 |
int max_split_pos = current_position(); |
|
5069 |
int min_split_pos = MAX2(it->previous_usage(shouldHaveRegister, max_split_pos) + 1, it->from()); |
|
5070 |
||
5071 |
TRACE_LINEAR_SCAN(2, tty->print ("----- splitting and spilling interval: "); it->print()); |
|
5072 |
TRACE_LINEAR_SCAN(2, tty->print_cr(" between %d and %d", min_split_pos, max_split_pos)); |
|
5073 |
||
5074 |
assert(it->state() == activeState, "why spill interval that is not active?"); |
|
5075 |
assert(it->from() <= min_split_pos, "cannot split before start of interval"); |
|
5076 |
assert(min_split_pos <= max_split_pos, "invalid order"); |
|
5077 |
assert(max_split_pos < it->to(), "cannot split at end end of interval"); |
|
5078 |
assert(current_position() < it->to(), "interval must not end before current position"); |
|
5079 |
||
5080 |
if (min_split_pos == it->from()) { |
|
5081 |
// the whole interval is never used, so spill it entirely to memory |
|
5082 |
TRACE_LINEAR_SCAN(2, tty->print_cr(" spilling entire interval because split pos is at beginning of interval")); |
|
5083 |
assert(it->first_usage(shouldHaveRegister) > current_position(), "interval must not have use position before current_position"); |
|
5084 |
||
5085 |
allocator()->assign_spill_slot(it); |
|
5086 |
allocator()->change_spill_state(it, min_split_pos); |
|
5087 |
||
5088 |
// Also kick parent intervals out of register to memory when they have no use |
|
5089 |
// position. This avoids short interval in register surrounded by intervals in |
|
5090 |
// memory -> avoid useless moves from memory to register and back |
|
5091 |
Interval* parent = it; |
|
5092 |
while (parent != NULL && parent->is_split_child()) { |
|
5093 |
parent = parent->split_child_before_op_id(parent->from()); |
|
5094 |
||
5095 |
if (parent->assigned_reg() < LinearScan::nof_regs) { |
|
5096 |
if (parent->first_usage(shouldHaveRegister) == max_jint) { |
|
5097 |
// parent is never used, so kick it out of its assigned register |
|
5098 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" kicking out interval %d out of its register because it is never used", parent->reg_num())); |
|
5099 |
allocator()->assign_spill_slot(parent); |
|
5100 |
} else { |
|
5101 |
// do not go further back because the register is actually used by the interval |
|
5102 |
parent = NULL; |
|
5103 |
} |
|
5104 |
} |
|
5105 |
} |
|
5106 |
||
5107 |
} else { |
|
5108 |
// search optimal split pos, split interval and spill only the right hand part |
|
5109 |
int optimal_split_pos = find_optimal_split_pos(it, min_split_pos, max_split_pos, false); |
|
5110 |
||
5111 |
assert(min_split_pos <= optimal_split_pos && optimal_split_pos <= max_split_pos, "out of range"); |
|
5112 |
assert(optimal_split_pos < it->to(), "cannot split at end of interval"); |
|
5113 |
assert(optimal_split_pos >= it->from(), "cannot split before start of interval"); |
|
5114 |
||
5115 |
if (!allocator()->is_block_begin(optimal_split_pos)) { |
|
5116 |
// move position before actual instruction (odd op_id) |
|
5117 |
optimal_split_pos = (optimal_split_pos - 1) | 1; |
|
5118 |
} |
|
5119 |
||
5120 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" splitting at position %d", optimal_split_pos)); |
|
5121 |
assert(allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 1), "split pos must be odd when not on block boundary"); |
|
5122 |
assert(!allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 0), "split pos must be even on block boundary"); |
|
5123 |
||
5124 |
Interval* spilled_part = it->split(optimal_split_pos); |
|
5125 |
allocator()->append_interval(spilled_part); |
|
5126 |
allocator()->assign_spill_slot(spilled_part); |
|
5127 |
allocator()->change_spill_state(spilled_part, optimal_split_pos); |
|
5128 |
||
5129 |
if (!allocator()->is_block_begin(optimal_split_pos)) { |
|
5130 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" inserting move from interval %d to %d", it->reg_num(), spilled_part->reg_num())); |
|
5131 |
insert_move(optimal_split_pos, it, spilled_part); |
|
5132 |
} |
|
5133 |
||
5134 |
// the current_split_child is needed later when moves are inserted for reloading |
|
5135 |
assert(spilled_part->current_split_child() == it, "overwriting wrong current_split_child"); |
|
5136 |
spilled_part->make_current_split_child(); |
|
5137 |
||
5138 |
TRACE_LINEAR_SCAN(2, tty->print_cr(" split interval in two parts")); |
|
5139 |
TRACE_LINEAR_SCAN(2, tty->print (" "); it->print()); |
|
5140 |
TRACE_LINEAR_SCAN(2, tty->print (" "); spilled_part->print()); |
|
5141 |
} |
|
5142 |
} |
|
5143 |
||
5144 |
||
5145 |
void LinearScanWalker::split_stack_interval(Interval* it) { |
|
5146 |
int min_split_pos = current_position() + 1; |
|
5147 |
int max_split_pos = MIN2(it->first_usage(shouldHaveRegister), it->to()); |
|
5148 |
||
5149 |
split_before_usage(it, min_split_pos, max_split_pos); |
|
5150 |
} |
|
5151 |
||
5152 |
void LinearScanWalker::split_when_partial_register_available(Interval* it, int register_available_until) { |
|
5153 |
int min_split_pos = MAX2(it->previous_usage(shouldHaveRegister, register_available_until), it->from() + 1); |
|
5154 |
int max_split_pos = register_available_until; |
|
5155 |
||
5156 |
split_before_usage(it, min_split_pos, max_split_pos); |
|
5157 |
} |
|
5158 |
||
5159 |
void LinearScanWalker::split_and_spill_interval(Interval* it) { |
|
5160 |
assert(it->state() == activeState || it->state() == inactiveState, "other states not allowed"); |
|
5161 |
||
5162 |
int current_pos = current_position(); |
|
5163 |
if (it->state() == inactiveState) { |
|
5164 |
// the interval is currently inactive, so no spill slot is needed for now. |
|
5165 |
// when the split part is activated, the interval has a new chance to get a register, |
|
5166 |
// so in the best case no stack slot is necessary |
|
5167 |
assert(it->has_hole_between(current_pos - 1, current_pos + 1), "interval can not be inactive otherwise"); |
|
5168 |
split_before_usage(it, current_pos + 1, current_pos + 1); |
|
5169 |
||
5170 |
} else { |
|
5171 |
// search the position where the interval must have a register and split |
|
5172 |
// at the optimal position before. |
|
5173 |
// The new created part is added to the unhandled list and will get a register |
|
5174 |
// when it is activated |
|
5175 |
int min_split_pos = current_pos + 1; |
|
5176 |
int max_split_pos = MIN2(it->next_usage(mustHaveRegister, min_split_pos), it->to()); |
|
5177 |
||
5178 |
split_before_usage(it, min_split_pos, max_split_pos); |
|
5179 |
||
5180 |
assert(it->next_usage(mustHaveRegister, current_pos) == max_jint, "the remaining part is spilled to stack and therefore has no register"); |
|
5181 |
split_for_spilling(it); |
|
5182 |
} |
|
5183 |
} |
|
5184 |
||
5185 |
||
5186 |
int LinearScanWalker::find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split) { |
|
5187 |
int min_full_reg = any_reg; |
|
5188 |
int max_partial_reg = any_reg; |
|
5189 |
||
5190 |
for (int i = _first_reg; i <= _last_reg; i++) { |
|
5191 |
if (i == ignore_reg) { |
|
5192 |
// this register must be ignored |
|
5193 |
||
5194 |
} else if (_use_pos[i] >= interval_to) { |
|
5195 |
// this register is free for the full interval |
|
5196 |
if (min_full_reg == any_reg || i == hint_reg || (_use_pos[i] < _use_pos[min_full_reg] && min_full_reg != hint_reg)) { |
|
5197 |
min_full_reg = i; |
|
5198 |
} |
|
5199 |
} else if (_use_pos[i] > reg_needed_until) { |
|
5200 |
// this register is at least free until reg_needed_until |
|
5201 |
if (max_partial_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_partial_reg] && max_partial_reg != hint_reg)) { |
|
5202 |
max_partial_reg = i; |
|
5203 |
} |
|
5204 |
} |
|
5205 |
} |
|
5206 |
||
5207 |
if (min_full_reg != any_reg) { |
|
5208 |
return min_full_reg; |
|
5209 |
} else if (max_partial_reg != any_reg) { |
|
5210 |
*need_split = true; |
|
5211 |
return max_partial_reg; |
|
5212 |
} else { |
|
5213 |
return any_reg; |
|
5214 |
} |
|
5215 |
} |
|
5216 |
||
5217 |
int LinearScanWalker::find_free_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split) { |
|
5218 |
assert((_last_reg - _first_reg + 1) % 2 == 0, "adjust algorithm"); |
|
5219 |
||
5220 |
int min_full_reg = any_reg; |
|
5221 |
int max_partial_reg = any_reg; |
|
5222 |
||
5223 |
for (int i = _first_reg; i < _last_reg; i+=2) { |
|
5224 |
if (_use_pos[i] >= interval_to && _use_pos[i + 1] >= interval_to) { |
|
5225 |
// this register is free for the full interval |
|
5226 |
if (min_full_reg == any_reg || i == hint_reg || (_use_pos[i] < _use_pos[min_full_reg] && min_full_reg != hint_reg)) { |
|
5227 |
min_full_reg = i; |
|
5228 |
} |
|
5229 |
} else if (_use_pos[i] > reg_needed_until && _use_pos[i + 1] > reg_needed_until) { |
|
5230 |
// this register is at least free until reg_needed_until |
|
5231 |
if (max_partial_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_partial_reg] && max_partial_reg != hint_reg)) { |
|
5232 |
max_partial_reg = i; |
|
5233 |
} |
|
5234 |
} |
|
5235 |
} |
|
5236 |
||
5237 |
if (min_full_reg != any_reg) { |
|
5238 |
return min_full_reg; |
|
5239 |
} else if (max_partial_reg != any_reg) { |
|
5240 |
*need_split = true; |
|
5241 |
return max_partial_reg; |
|
5242 |
} else { |
|
5243 |
return any_reg; |
|
5244 |
} |
|
5245 |
} |
|
5246 |
||
5247 |
||
5248 |
bool LinearScanWalker::alloc_free_reg(Interval* cur) { |
|
5249 |
TRACE_LINEAR_SCAN(2, tty->print("trying to find free register for "); cur->print()); |
|
5250 |
||
5251 |
init_use_lists(true); |
|
5252 |
free_exclude_active_fixed(); |
|
5253 |
free_exclude_active_any(); |
|
5254 |
free_collect_inactive_fixed(cur); |
|
5255 |
free_collect_inactive_any(cur); |
|
5256 |
// free_collect_unhandled(fixedKind, cur); |
|
5257 |
assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"); |
|
5258 |
||
5259 |
// _use_pos contains the start of the next interval that has this register assigned |
|
5260 |
// (either as a fixed register or a normal allocated register in the past) |
|
5261 |
// only intervals overlapping with cur are processed, non-overlapping invervals can be ignored safely |
|
5262 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" state of registers:")); |
|
5263 |
TRACE_LINEAR_SCAN(4, for (int i = _first_reg; i <= _last_reg; i++) tty->print_cr(" reg %d: use_pos: %d", i, _use_pos[i])); |
|
5264 |
||
5265 |
int hint_reg, hint_regHi; |
|
5266 |
Interval* register_hint = cur->register_hint(); |
|
5267 |
if (register_hint != NULL) { |
|
5268 |
hint_reg = register_hint->assigned_reg(); |
|
5269 |
hint_regHi = register_hint->assigned_regHi(); |
|
5270 |
||
5271 |
if (allocator()->is_precolored_cpu_interval(register_hint)) { |
|
5272 |
assert(hint_reg != any_reg && hint_regHi == any_reg, "must be for fixed intervals"); |
|
5273 |
hint_regHi = hint_reg + 1; // connect e.g. eax-edx |
|
5274 |
} |
|
5275 |
TRACE_LINEAR_SCAN(4, tty->print(" hint registers %d, %d from interval ", hint_reg, hint_regHi); register_hint->print()); |
|
5276 |
||
5277 |
} else { |
|
5278 |
hint_reg = any_reg; |
|
5279 |
hint_regHi = any_reg; |
|
5280 |
} |
|
5281 |
assert(hint_reg == any_reg || hint_reg != hint_regHi, "hint reg and regHi equal"); |
|
5282 |
assert(cur->assigned_reg() == any_reg && cur->assigned_regHi() == any_reg, "register already assigned to interval"); |
|
5283 |
||
5284 |
// the register must be free at least until this position |
|
5285 |
int reg_needed_until = cur->from() + 1; |
|
5286 |
int interval_to = cur->to(); |
|
5287 |
||
5288 |
bool need_split = false; |
|
5289 |
int split_pos = -1; |
|
5290 |
int reg = any_reg; |
|
5291 |
int regHi = any_reg; |
|
5292 |
||
5293 |
if (_adjacent_regs) { |
|
5294 |
reg = find_free_double_reg(reg_needed_until, interval_to, hint_reg, &need_split); |
|
5295 |
regHi = reg + 1; |
|
5296 |
if (reg == any_reg) { |
|
5297 |
return false; |
|
5298 |
} |
|
5299 |
split_pos = MIN2(_use_pos[reg], _use_pos[regHi]); |
|
5300 |
||
5301 |
} else { |
|
5302 |
reg = find_free_reg(reg_needed_until, interval_to, hint_reg, any_reg, &need_split); |
|
5303 |
if (reg == any_reg) { |
|
5304 |
return false; |
|
5305 |
} |
|
5306 |
split_pos = _use_pos[reg]; |
|
5307 |
||
5308 |
if (_num_phys_regs == 2) { |
|
5309 |
regHi = find_free_reg(reg_needed_until, interval_to, hint_regHi, reg, &need_split); |
|
5310 |
||
5311 |
if (_use_pos[reg] < interval_to && regHi == any_reg) { |
|
5312 |
// do not split interval if only one register can be assigned until the split pos |
|
5313 |
// (when one register is found for the whole interval, split&spill is only |
|
5314 |
// performed for the hi register) |
|
5315 |
return false; |
|
5316 |
||
5317 |
} else if (regHi != any_reg) { |
|
5318 |
split_pos = MIN2(split_pos, _use_pos[regHi]); |
|
5319 |
||
5320 |
// sort register numbers to prevent e.g. a move from eax,ebx to ebx,eax |
|
5321 |
if (reg > regHi) { |
|
5322 |
int temp = reg; |
|
5323 |
reg = regHi; |
|
5324 |
regHi = temp; |
|
5325 |
} |
|
5326 |
} |
|
5327 |
} |
|
5328 |
} |
|
5329 |
||
5330 |
cur->assign_reg(reg, regHi); |
|
5331 |
TRACE_LINEAR_SCAN(2, tty->print_cr("selected register %d, %d", reg, regHi)); |
|
5332 |
||
5333 |
assert(split_pos > 0, "invalid split_pos"); |
|
5334 |
if (need_split) { |
|
5335 |
// register not available for full interval, so split it |
|
5336 |
split_when_partial_register_available(cur, split_pos); |
|
5337 |
} |
|
5338 |
||
5339 |
// only return true if interval is completely assigned |
|
5340 |
return _num_phys_regs == 1 || regHi != any_reg; |
|
5341 |
} |
|
5342 |
||
5343 |
||
5344 |
int LinearScanWalker::find_locked_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split) { |
|
5345 |
int max_reg = any_reg; |
|
5346 |
||
5347 |
for (int i = _first_reg; i <= _last_reg; i++) { |
|
5348 |
if (i == ignore_reg) { |
|
5349 |
// this register must be ignored |
|
5350 |
||
5351 |
} else if (_use_pos[i] > reg_needed_until) { |
|
5352 |
if (max_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_reg] && max_reg != hint_reg)) { |
|
5353 |
max_reg = i; |
|
5354 |
} |
|
5355 |
} |
|
5356 |
} |
|
5357 |
||
5358 |
if (max_reg != any_reg && _block_pos[max_reg] <= interval_to) { |
|
5359 |
*need_split = true; |
|
5360 |
} |
|
5361 |
||
5362 |
return max_reg; |
|
5363 |
} |
|
5364 |
||
5365 |
int LinearScanWalker::find_locked_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split) { |
|
5366 |
assert((_last_reg - _first_reg + 1) % 2 == 0, "adjust algorithm"); |
|
5367 |
||
5368 |
int max_reg = any_reg; |
|
5369 |
||
5370 |
for (int i = _first_reg; i < _last_reg; i+=2) { |
|
5371 |
if (_use_pos[i] > reg_needed_until && _use_pos[i + 1] > reg_needed_until) { |
|
5372 |
if (max_reg == any_reg || _use_pos[i] > _use_pos[max_reg]) { |
|
5373 |
max_reg = i; |
|
5374 |
} |
|
5375 |
} |
|
5376 |
} |
|
5377 |
||
5378 |
if (_block_pos[max_reg] <= interval_to || _block_pos[max_reg + 1] <= interval_to) { |
|
5379 |
*need_split = true; |
|
5380 |
} |
|
5381 |
||
5382 |
return max_reg; |
|
5383 |
} |
|
5384 |
||
5385 |
void LinearScanWalker::split_and_spill_intersecting_intervals(int reg, int regHi) { |
|
5386 |
assert(reg != any_reg, "no register assigned"); |
|
5387 |
||
5388 |
for (int i = 0; i < _spill_intervals[reg]->length(); i++) { |
|
5389 |
Interval* it = _spill_intervals[reg]->at(i); |
|
5390 |
remove_from_list(it); |
|
5391 |
split_and_spill_interval(it); |
|
5392 |
} |
|
5393 |
||
5394 |
if (regHi != any_reg) { |
|
5395 |
IntervalList* processed = _spill_intervals[reg]; |
|
5396 |
for (int i = 0; i < _spill_intervals[regHi]->length(); i++) { |
|
5397 |
Interval* it = _spill_intervals[regHi]->at(i); |
|
5398 |
if (processed->index_of(it) == -1) { |
|
5399 |
remove_from_list(it); |
|
5400 |
split_and_spill_interval(it); |
|
5401 |
} |
|
5402 |
} |
|
5403 |
} |
|
5404 |
} |
|
5405 |
||
5406 |
||
5407 |
// Split an Interval and spill it to memory so that cur can be placed in a register |
|
5408 |
void LinearScanWalker::alloc_locked_reg(Interval* cur) { |
|
5409 |
TRACE_LINEAR_SCAN(2, tty->print("need to split and spill to get register for "); cur->print()); |
|
5410 |
||
5411 |
// collect current usage of registers |
|
5412 |
init_use_lists(false); |
|
5413 |
spill_exclude_active_fixed(); |
|
5414 |
// spill_block_unhandled_fixed(cur); |
|
5415 |
assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"); |
|
5416 |
spill_block_inactive_fixed(cur); |
|
5417 |
spill_collect_active_any(); |
|
5418 |
spill_collect_inactive_any(cur); |
|
5419 |
||
5420 |
#ifndef PRODUCT |
|
5421 |
if (TraceLinearScanLevel >= 4) { |
|
5422 |
tty->print_cr(" state of registers:"); |
|
5423 |
for (int i = _first_reg; i <= _last_reg; i++) { |
|
5424 |
tty->print(" reg %d: use_pos: %d, block_pos: %d, intervals: ", i, _use_pos[i], _block_pos[i]); |
|
5425 |
for (int j = 0; j < _spill_intervals[i]->length(); j++) { |
|
5426 |
tty->print("%d ", _spill_intervals[i]->at(j)->reg_num()); |
|
5427 |
} |
|
5428 |
tty->cr(); |
|
5429 |
} |
|
5430 |
} |
|
5431 |
#endif |
|
5432 |
||
5433 |
// the register must be free at least until this position |
|
5434 |
int reg_needed_until = MIN2(cur->first_usage(mustHaveRegister), cur->from() + 1); |
|
5435 |
int interval_to = cur->to(); |
|
5436 |
assert (reg_needed_until > 0 && reg_needed_until < max_jint, "interval has no use"); |
|
5437 |
||
5438 |
int split_pos = 0; |
|
5439 |
int use_pos = 0; |
|
5440 |
bool need_split = false; |
|
5441 |
int reg, regHi; |
|
5442 |
||
5443 |
if (_adjacent_regs) { |
|
5444 |
reg = find_locked_double_reg(reg_needed_until, interval_to, any_reg, &need_split); |
|
5445 |
regHi = reg + 1; |
|
5446 |
||
5447 |
if (reg != any_reg) { |
|
5448 |
use_pos = MIN2(_use_pos[reg], _use_pos[regHi]); |
|
5449 |
split_pos = MIN2(_block_pos[reg], _block_pos[regHi]); |
|
5450 |
} |
|
5451 |
} else { |
|
5452 |
reg = find_locked_reg(reg_needed_until, interval_to, any_reg, cur->assigned_reg(), &need_split); |
|
5453 |
regHi = any_reg; |
|
5454 |
||
5455 |
if (reg != any_reg) { |
|
5456 |
use_pos = _use_pos[reg]; |
|
5457 |
split_pos = _block_pos[reg]; |
|
5458 |
||
5459 |
if (_num_phys_regs == 2) { |
|
5460 |
if (cur->assigned_reg() != any_reg) { |
|
5461 |
regHi = reg; |
|
5462 |
reg = cur->assigned_reg(); |
|
5463 |
} else { |
|
5464 |
regHi = find_locked_reg(reg_needed_until, interval_to, any_reg, reg, &need_split); |
|
5465 |
if (regHi != any_reg) { |
|
5466 |
use_pos = MIN2(use_pos, _use_pos[regHi]); |
|
5467 |
split_pos = MIN2(split_pos, _block_pos[regHi]); |
|
5468 |
} |
|
5469 |
} |
|
5470 |
||
5471 |
if (regHi != any_reg && reg > regHi) { |
|
5472 |
// sort register numbers to prevent e.g. a move from eax,ebx to ebx,eax |
|
5473 |
int temp = reg; |
|
5474 |
reg = regHi; |
|
5475 |
regHi = temp; |
|
5476 |
} |
|
5477 |
} |
|
5478 |
} |
|
5479 |
} |
|
5480 |
||
5481 |
if (reg == any_reg || (_num_phys_regs == 2 && regHi == any_reg) || use_pos <= cur->first_usage(mustHaveRegister)) { |
|
5482 |
// the first use of cur is later than the spilling position -> spill cur |
|
5483 |
TRACE_LINEAR_SCAN(4, tty->print_cr("able to spill current interval. first_usage(register): %d, use_pos: %d", cur->first_usage(mustHaveRegister), use_pos)); |
|
5484 |
||
5485 |
if (cur->first_usage(mustHaveRegister) <= cur->from() + 1) { |
|
5486 |
assert(false, "cannot spill interval that is used in first instruction (possible reason: no register found)"); |
|
5487 |
// assign a reasonable register and do a bailout in product mode to avoid errors |
|
5488 |
allocator()->assign_spill_slot(cur); |
|
5489 |
BAILOUT("LinearScan: no register found"); |
|
5490 |
} |
|
5491 |
||
5492 |
split_and_spill_interval(cur); |
|
5493 |
} else { |
|
5494 |
TRACE_LINEAR_SCAN(4, tty->print_cr("decided to use register %d, %d", reg, regHi)); |
|
5495 |
assert(reg != any_reg && (_num_phys_regs == 1 || regHi != any_reg), "no register found"); |
|
5496 |
assert(split_pos > 0, "invalid split_pos"); |
|
5497 |
assert(need_split == false || split_pos > cur->from(), "splitting interval at from"); |
|
5498 |
||
5499 |
cur->assign_reg(reg, regHi); |
|
5500 |
if (need_split) { |
|
5501 |
// register not available for full interval, so split it |
|
5502 |
split_when_partial_register_available(cur, split_pos); |
|
5503 |
} |
|
5504 |
||
5505 |
// perform splitting and spilling for all affected intervalls |
|
5506 |
split_and_spill_intersecting_intervals(reg, regHi); |
|
5507 |
} |
|
5508 |
} |
|
5509 |
||
5510 |
bool LinearScanWalker::no_allocation_possible(Interval* cur) { |
|
1066 | 5511 |
#ifdef X86 |
1 | 5512 |
// fast calculation of intervals that can never get a register because the |
5513 |
// the next instruction is a call that blocks all registers |
|
5514 |
// Note: this does not work if callee-saved registers are available (e.g. on Sparc) |
|
5515 |
||
5516 |
// check if this interval is the result of a split operation |
|
5517 |
// (an interval got a register until this position) |
|
5518 |
int pos = cur->from(); |
|
5519 |
if ((pos & 1) == 1) { |
|
5520 |
// the current instruction is a call that blocks all registers |
|
5521 |
if (pos < allocator()->max_lir_op_id() && allocator()->has_call(pos + 1)) { |
|
5522 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" free register cannot be available because all registers blocked by following call")); |
|
5523 |
||
5524 |
// safety check that there is really no register available |
|
5525 |
assert(alloc_free_reg(cur) == false, "found a register for this interval"); |
|
5526 |
return true; |
|
5527 |
} |
|
5528 |
||
5529 |
} |
|
5530 |
#endif |
|
5531 |
return false; |
|
5532 |
} |
|
5533 |
||
5534 |
void LinearScanWalker::init_vars_for_alloc(Interval* cur) { |
|
5535 |
BasicType type = cur->type(); |
|
5536 |
_num_phys_regs = LinearScan::num_physical_regs(type); |
|
5537 |
_adjacent_regs = LinearScan::requires_adjacent_regs(type); |
|
5538 |
||
5539 |
if (pd_init_regs_for_alloc(cur)) { |
|
5540 |
// the appropriate register range was selected. |
|
5541 |
} else if (type == T_FLOAT || type == T_DOUBLE) { |
|
5542 |
_first_reg = pd_first_fpu_reg; |
|
5543 |
_last_reg = pd_last_fpu_reg; |
|
5544 |
} else { |
|
5545 |
_first_reg = pd_first_cpu_reg; |
|
5546 |
_last_reg = pd_last_cpu_reg; |
|
5547 |
} |
|
5548 |
||
5549 |
assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range"); |
|
5550 |
assert(0 <= _last_reg && _last_reg < LinearScan::nof_regs, "out of range"); |
|
5551 |
} |
|
5552 |
||
5553 |
||
5554 |
bool LinearScanWalker::is_move(LIR_Op* op, Interval* from, Interval* to) { |
|
5555 |
if (op->code() != lir_move) { |
|
5556 |
return false; |
|
5557 |
} |
|
5558 |
assert(op->as_Op1() != NULL, "move must be LIR_Op1"); |
|
5559 |
||
5560 |
LIR_Opr in = ((LIR_Op1*)op)->in_opr(); |
|
5561 |
LIR_Opr res = ((LIR_Op1*)op)->result_opr(); |
|
5562 |
return in->is_virtual() && res->is_virtual() && in->vreg_number() == from->reg_num() && res->vreg_number() == to->reg_num(); |
|
5563 |
} |
|
5564 |
||
5565 |
// optimization (especially for phi functions of nested loops): |
|
5566 |
// assign same spill slot to non-intersecting intervals |
|
5567 |
void LinearScanWalker::combine_spilled_intervals(Interval* cur) { |
|
5568 |
if (cur->is_split_child()) { |
|
5569 |
// optimization is only suitable for split parents |
|
5570 |
return; |
|
5571 |
} |
|
5572 |
||
5573 |
Interval* register_hint = cur->register_hint(false); |
|
5574 |
if (register_hint == NULL) { |
|
5575 |
// cur is not the target of a move, otherwise register_hint would be set |
|
5576 |
return; |
|
5577 |
} |
|
5578 |
assert(register_hint->is_split_parent(), "register hint must be split parent"); |
|
5579 |
||
5580 |
if (cur->spill_state() != noOptimization || register_hint->spill_state() != noOptimization) { |
|
5581 |
// combining the stack slots for intervals where spill move optimization is applied |
|
5582 |
// is not benefitial and would cause problems |
|
5583 |
return; |
|
5584 |
} |
|
5585 |
||
5586 |
int begin_pos = cur->from(); |
|
5587 |
int end_pos = cur->to(); |
|
5588 |
if (end_pos > allocator()->max_lir_op_id() || (begin_pos & 1) != 0 || (end_pos & 1) != 0) { |
|
5589 |
// safety check that lir_op_with_id is allowed |
|
5590 |
return; |
|
5591 |
} |
|
5592 |
||
5593 |
if (!is_move(allocator()->lir_op_with_id(begin_pos), register_hint, cur) || !is_move(allocator()->lir_op_with_id(end_pos), cur, register_hint)) { |
|
5594 |
// cur and register_hint are not connected with two moves |
|
5595 |
return; |
|
5596 |
} |
|
5597 |
||
5598 |
Interval* begin_hint = register_hint->split_child_at_op_id(begin_pos, LIR_OpVisitState::inputMode); |
|
5599 |
Interval* end_hint = register_hint->split_child_at_op_id(end_pos, LIR_OpVisitState::outputMode); |
|
5600 |
if (begin_hint == end_hint || begin_hint->to() != begin_pos || end_hint->from() != end_pos) { |
|
5601 |
// register_hint must be split, otherwise the re-writing of use positions does not work |
|
5602 |
return; |
|
5603 |
} |
|
5604 |
||
5605 |
assert(begin_hint->assigned_reg() != any_reg, "must have register assigned"); |
|
5606 |
assert(end_hint->assigned_reg() == any_reg, "must not have register assigned"); |
|
5607 |
assert(cur->first_usage(mustHaveRegister) == begin_pos, "must have use position at begin of interval because of move"); |
|
5608 |
assert(end_hint->first_usage(mustHaveRegister) == end_pos, "must have use position at begin of interval because of move"); |
|
5609 |
||
5610 |
if (begin_hint->assigned_reg() < LinearScan::nof_regs) { |
|
5611 |
// register_hint is not spilled at begin_pos, so it would not be benefitial to immediately spill cur |
|
5612 |
return; |
|
5613 |
} |
|
5614 |
assert(register_hint->canonical_spill_slot() != -1, "must be set when part of interval was spilled"); |
|
5615 |
||
5616 |
// modify intervals such that cur gets the same stack slot as register_hint |
|
5617 |
// delete use positions to prevent the intervals to get a register at beginning |
|
5618 |
cur->set_canonical_spill_slot(register_hint->canonical_spill_slot()); |
|
5619 |
cur->remove_first_use_pos(); |
|
5620 |
end_hint->remove_first_use_pos(); |
|
5621 |
} |
|
5622 |
||
5623 |
||
5624 |
// allocate a physical register or memory location to an interval |
|
5625 |
bool LinearScanWalker::activate_current() { |
|
5626 |
Interval* cur = current(); |
|
5627 |
bool result = true; |
|
5628 |
||
5629 |
TRACE_LINEAR_SCAN(2, tty->print ("+++++ activating interval "); cur->print()); |
|
5630 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" split_parent: %d, insert_move_when_activated: %d", cur->split_parent()->reg_num(), cur->insert_move_when_activated())); |
|
5631 |
||
5632 |
if (cur->assigned_reg() >= LinearScan::nof_regs) { |
|
5633 |
// activating an interval that has a stack slot assigned -> split it at first use position |
|
5634 |
// used for method parameters |
|
5635 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" interval has spill slot assigned (method parameter) -> split it before first use")); |
|
5636 |
||
5637 |
split_stack_interval(cur); |
|
5638 |
result = false; |
|
5639 |
||
5640 |
} else if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::must_start_in_memory)) { |
|
5641 |
// activating an interval that must start in a stack slot, but may get a register later |
|
5642 |
// used for lir_roundfp: rounding is done by store to stack and reload later |
|
5643 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" interval must start in stack slot -> split it before first use")); |
|
5644 |
assert(cur->assigned_reg() == any_reg && cur->assigned_regHi() == any_reg, "register already assigned"); |
|
5645 |
||
5646 |
allocator()->assign_spill_slot(cur); |
|
5647 |
split_stack_interval(cur); |
|
5648 |
result = false; |
|
5649 |
||
5650 |
} else if (cur->assigned_reg() == any_reg) { |
|
5651 |
// interval has not assigned register -> normal allocation |
|
5652 |
// (this is the normal case for most intervals) |
|
5653 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" normal allocation of register")); |
|
5654 |
||
5655 |
// assign same spill slot to non-intersecting intervals |
|
5656 |
combine_spilled_intervals(cur); |
|
5657 |
||
5658 |
init_vars_for_alloc(cur); |
|
5659 |
if (no_allocation_possible(cur) || !alloc_free_reg(cur)) { |
|
5660 |
// no empty register available. |
|
5661 |
// split and spill another interval so that this interval gets a register |
|
5662 |
alloc_locked_reg(cur); |
|
5663 |
} |
|
5664 |
||
5665 |
// spilled intervals need not be move to active-list |
|
5666 |
if (cur->assigned_reg() >= LinearScan::nof_regs) { |
|
5667 |
result = false; |
|
5668 |
} |
|
5669 |
} |
|
5670 |
||
5671 |
// load spilled values that become active from stack slot to register |
|
5672 |
if (cur->insert_move_when_activated()) { |
|
5673 |
assert(cur->is_split_child(), "must be"); |
|
5674 |
assert(cur->current_split_child() != NULL, "must be"); |
|
5675 |
assert(cur->current_split_child()->reg_num() != cur->reg_num(), "cannot insert move between same interval"); |
|
5676 |
TRACE_LINEAR_SCAN(4, tty->print_cr("Inserting move from interval %d to %d because insert_move_when_activated is set", cur->current_split_child()->reg_num(), cur->reg_num())); |
|
5677 |
||
5678 |
insert_move(cur->from(), cur->current_split_child(), cur); |
|
5679 |
} |
|
5680 |
cur->make_current_split_child(); |
|
5681 |
||
5682 |
return result; // true = interval is moved to active list |
|
5683 |
} |
|
5684 |
||
5685 |
||
5686 |
// Implementation of EdgeMoveOptimizer |
|
5687 |
||
5688 |
EdgeMoveOptimizer::EdgeMoveOptimizer() : |
|
5689 |
_edge_instructions(4), |
|
5690 |
_edge_instructions_idx(4) |
|
5691 |
{ |
|
5692 |
} |
|
5693 |
||
5694 |
void EdgeMoveOptimizer::optimize(BlockList* code) { |
|
5695 |
EdgeMoveOptimizer optimizer = EdgeMoveOptimizer(); |
|
5696 |
||
5697 |
// ignore the first block in the list (index 0 is not processed) |
|
5698 |
for (int i = code->length() - 1; i >= 1; i--) { |
|
5699 |
BlockBegin* block = code->at(i); |
|
5700 |
||
5701 |
if (block->number_of_preds() > 1 && !block->is_set(BlockBegin::exception_entry_flag)) { |
|
5702 |
optimizer.optimize_moves_at_block_end(block); |
|
5703 |
} |
|
5704 |
if (block->number_of_sux() == 2) { |
|
5705 |
optimizer.optimize_moves_at_block_begin(block); |
|
5706 |
} |
|
5707 |
} |
|
5708 |
} |
|
5709 |
||
5710 |
||
5711 |
// clear all internal data structures |
|
5712 |
void EdgeMoveOptimizer::init_instructions() { |
|
5713 |
_edge_instructions.clear(); |
|
5714 |
_edge_instructions_idx.clear(); |
|
5715 |
} |
|
5716 |
||
5717 |
// append a lir-instruction-list and the index of the current operation in to the list |
|
5718 |
void EdgeMoveOptimizer::append_instructions(LIR_OpList* instructions, int instructions_idx) { |
|
5719 |
_edge_instructions.append(instructions); |
|
5720 |
_edge_instructions_idx.append(instructions_idx); |
|
5721 |
} |
|
5722 |
||
5723 |
// return the current operation of the given edge (predecessor or successor) |
|
5724 |
LIR_Op* EdgeMoveOptimizer::instruction_at(int edge) { |
|
5725 |
LIR_OpList* instructions = _edge_instructions.at(edge); |
|
5726 |
int idx = _edge_instructions_idx.at(edge); |
|
5727 |
||
5728 |
if (idx < instructions->length()) { |
|
5729 |
return instructions->at(idx); |
|
5730 |
} else { |
|
5731 |
return NULL; |
|
5732 |
} |
|
5733 |
} |
|
5734 |
||
5735 |
// removes the current operation of the given edge (predecessor or successor) |
|
5736 |
void EdgeMoveOptimizer::remove_cur_instruction(int edge, bool decrement_index) { |
|
5737 |
LIR_OpList* instructions = _edge_instructions.at(edge); |
|
5738 |
int idx = _edge_instructions_idx.at(edge); |
|
5739 |
instructions->remove_at(idx); |
|
5740 |
||
5741 |
if (decrement_index) { |
|
5742 |
_edge_instructions_idx.at_put(edge, idx - 1); |
|
5743 |
} |
|
5744 |
} |
|
5745 |
||
5746 |
||
5747 |
bool EdgeMoveOptimizer::operations_different(LIR_Op* op1, LIR_Op* op2) { |
|
5748 |
if (op1 == NULL || op2 == NULL) { |
|
5749 |
// at least one block is already empty -> no optimization possible |
|
5750 |
return true; |
|
5751 |
} |
|
5752 |
||
5753 |
if (op1->code() == lir_move && op2->code() == lir_move) { |
|
5754 |
assert(op1->as_Op1() != NULL, "move must be LIR_Op1"); |
|
5755 |
assert(op2->as_Op1() != NULL, "move must be LIR_Op1"); |
|
5756 |
LIR_Op1* move1 = (LIR_Op1*)op1; |
|
5757 |
LIR_Op1* move2 = (LIR_Op1*)op2; |
|
5758 |
if (move1->info() == move2->info() && move1->in_opr() == move2->in_opr() && move1->result_opr() == move2->result_opr()) { |
|
5759 |
// these moves are exactly equal and can be optimized |
|
5760 |
return false; |
|
5761 |
} |
|
5762 |
||
5763 |
} else if (op1->code() == lir_fxch && op2->code() == lir_fxch) { |
|
5764 |
assert(op1->as_Op1() != NULL, "fxch must be LIR_Op1"); |
|
5765 |
assert(op2->as_Op1() != NULL, "fxch must be LIR_Op1"); |
|
5766 |
LIR_Op1* fxch1 = (LIR_Op1*)op1; |
|
5767 |
LIR_Op1* fxch2 = (LIR_Op1*)op2; |
|
5768 |
if (fxch1->in_opr()->as_jint() == fxch2->in_opr()->as_jint()) { |
|
5769 |
// equal FPU stack operations can be optimized |
|
5770 |
return false; |
|
5771 |
} |
|
5772 |
||
5773 |
} else if (op1->code() == lir_fpop_raw && op2->code() == lir_fpop_raw) { |
|
5774 |
// equal FPU stack operations can be optimized |
|
5775 |
return false; |
|
5776 |
} |
|
5777 |
||
5778 |
// no optimization possible |
|
5779 |
return true; |
|
5780 |
} |
|
5781 |
||
5782 |
void EdgeMoveOptimizer::optimize_moves_at_block_end(BlockBegin* block) { |
|
5783 |
TRACE_LINEAR_SCAN(4, tty->print_cr("optimizing moves at end of block B%d", block->block_id())); |
|
5784 |
||
5785 |
if (block->is_predecessor(block)) { |
|
5786 |
// currently we can't handle this correctly. |
|
5787 |
return; |
|
5788 |
} |
|
5789 |
||
5790 |
init_instructions(); |
|
5791 |
int num_preds = block->number_of_preds(); |
|
5792 |
assert(num_preds > 1, "do not call otherwise"); |
|
5793 |
assert(!block->is_set(BlockBegin::exception_entry_flag), "exception handlers not allowed"); |
|
5794 |
||
5795 |
// setup a list with the lir-instructions of all predecessors |
|
5796 |
int i; |
|
5797 |
for (i = 0; i < num_preds; i++) { |
|
5798 |
BlockBegin* pred = block->pred_at(i); |
|
5799 |
LIR_OpList* pred_instructions = pred->lir()->instructions_list(); |
|
5800 |
||
5801 |
if (pred->number_of_sux() != 1) { |
|
5802 |
// this can happen with switch-statements where multiple edges are between |
|
5803 |
// the same blocks. |
|
5804 |
return; |
|
5805 |
} |
|
5806 |
||
5807 |
assert(pred->number_of_sux() == 1, "can handle only one successor"); |
|
5808 |
assert(pred->sux_at(0) == block, "invalid control flow"); |
|
5809 |
assert(pred_instructions->last()->code() == lir_branch, "block with successor must end with branch"); |
|
5810 |
assert(pred_instructions->last()->as_OpBranch() != NULL, "branch must be LIR_OpBranch"); |
|
5811 |
assert(pred_instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block must end with unconditional branch"); |
|
5812 |
||
5813 |
if (pred_instructions->last()->info() != NULL) { |
|
5814 |
// can not optimize instructions when debug info is needed |
|
5815 |
return; |
|
5816 |
} |
|
5817 |
||
5818 |
// ignore the unconditional branch at the end of the block |
|
5819 |
append_instructions(pred_instructions, pred_instructions->length() - 2); |
|
5820 |
} |
|
5821 |
||
5822 |
||
5823 |
// process lir-instructions while all predecessors end with the same instruction |
|
5824 |
while (true) { |
|
5825 |
LIR_Op* op = instruction_at(0); |
|
5826 |
for (i = 1; i < num_preds; i++) { |
|
5827 |
if (operations_different(op, instruction_at(i))) { |
|
5828 |
// these instructions are different and cannot be optimized -> |
|
5829 |
// no further optimization possible |
|
5830 |
return; |
|
5831 |
} |
|
5832 |
} |
|
5833 |
||
5834 |
TRACE_LINEAR_SCAN(4, tty->print("found instruction that is equal in all %d predecessors: ", num_preds); op->print()); |
|
5835 |
||
5836 |
// insert the instruction at the beginning of the current block |
|
5837 |
block->lir()->insert_before(1, op); |
|
5838 |
||
5839 |
// delete the instruction at the end of all predecessors |
|
5840 |
for (i = 0; i < num_preds; i++) { |
|
5841 |
remove_cur_instruction(i, true); |
|
5842 |
} |
|
5843 |
} |
|
5844 |
} |
|
5845 |
||
5846 |
||
5847 |
void EdgeMoveOptimizer::optimize_moves_at_block_begin(BlockBegin* block) { |
|
5848 |
TRACE_LINEAR_SCAN(4, tty->print_cr("optimization moves at begin of block B%d", block->block_id())); |
|
5849 |
||
5850 |
init_instructions(); |
|
5851 |
int num_sux = block->number_of_sux(); |
|
5852 |
||
5853 |
LIR_OpList* cur_instructions = block->lir()->instructions_list(); |
|
5854 |
||
5855 |
assert(num_sux == 2, "method should not be called otherwise"); |
|
5856 |
assert(cur_instructions->last()->code() == lir_branch, "block with successor must end with branch"); |
|
5857 |
assert(cur_instructions->last()->as_OpBranch() != NULL, "branch must be LIR_OpBranch"); |
|
5858 |
assert(cur_instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block must end with unconditional branch"); |
|
5859 |
||
5860 |
if (cur_instructions->last()->info() != NULL) { |
|
5861 |
// can no optimize instructions when debug info is needed |
|
5862 |
return; |
|
5863 |
} |
|
5864 |
||
5865 |
LIR_Op* branch = cur_instructions->at(cur_instructions->length() - 2); |
|
5866 |
if (branch->info() != NULL || (branch->code() != lir_branch && branch->code() != lir_cond_float_branch)) { |
|
5867 |
// not a valid case for optimization |
|
5868 |
// currently, only blocks that end with two branches (conditional branch followed |
|
5869 |
// by unconditional branch) are optimized |
|
5870 |
return; |
|
5871 |
} |
|
5872 |
||
5873 |
// now it is guaranteed that the block ends with two branch instructions. |
|
5874 |
// the instructions are inserted at the end of the block before these two branches |
|
5875 |
int insert_idx = cur_instructions->length() - 2; |
|
5876 |
||
5877 |
int i; |
|
5878 |
#ifdef ASSERT |
|
5879 |
for (i = insert_idx - 1; i >= 0; i--) { |
|
5880 |
LIR_Op* op = cur_instructions->at(i); |
|
5881 |
if ((op->code() == lir_branch || op->code() == lir_cond_float_branch) && ((LIR_OpBranch*)op)->block() != NULL) { |
|
5882 |
assert(false, "block with two successors can have only two branch instructions"); |
|
5883 |
} |
|
5884 |
} |
|
5885 |
#endif |
|
5886 |
||
5887 |
// setup a list with the lir-instructions of all successors |
|
5888 |
for (i = 0; i < num_sux; i++) { |
|
5889 |
BlockBegin* sux = block->sux_at(i); |
|
5890 |
LIR_OpList* sux_instructions = sux->lir()->instructions_list(); |
|
5891 |
||
5892 |
assert(sux_instructions->at(0)->code() == lir_label, "block must start with label"); |
|
5893 |
||
5894 |
if (sux->number_of_preds() != 1) { |
|
5895 |
// this can happen with switch-statements where multiple edges are between |
|
5896 |
// the same blocks. |
|
5897 |
return; |
|
5898 |
} |
|
5899 |
assert(sux->pred_at(0) == block, "invalid control flow"); |
|
5900 |
assert(!sux->is_set(BlockBegin::exception_entry_flag), "exception handlers not allowed"); |
|
5901 |
||
5902 |
// ignore the label at the beginning of the block |
|
5903 |
append_instructions(sux_instructions, 1); |
|
5904 |
} |
|
5905 |
||
5906 |
// process lir-instructions while all successors begin with the same instruction |
|
5907 |
while (true) { |
|
5908 |
LIR_Op* op = instruction_at(0); |
|
5909 |
for (i = 1; i < num_sux; i++) { |
|
5910 |
if (operations_different(op, instruction_at(i))) { |
|
5911 |
// these instructions are different and cannot be optimized -> |
|
5912 |
// no further optimization possible |
|
5913 |
return; |
|
5914 |
} |
|
5915 |
} |
|
5916 |
||
5917 |
TRACE_LINEAR_SCAN(4, tty->print("----- found instruction that is equal in all %d successors: ", num_sux); op->print()); |
|
5918 |
||
5919 |
// insert instruction at end of current block |
|
5920 |
block->lir()->insert_before(insert_idx, op); |
|
5921 |
insert_idx++; |
|
5922 |
||
5923 |
// delete the instructions at the beginning of all successors |
|
5924 |
for (i = 0; i < num_sux; i++) { |
|
5925 |
remove_cur_instruction(i, false); |
|
5926 |
} |
|
5927 |
} |
|
5928 |
} |
|
5929 |
||
5930 |
||
5931 |
// Implementation of ControlFlowOptimizer |
|
5932 |
||
5933 |
ControlFlowOptimizer::ControlFlowOptimizer() : |
|
5934 |
_original_preds(4) |
|
5935 |
{ |
|
5936 |
} |
|
5937 |
||
5938 |
void ControlFlowOptimizer::optimize(BlockList* code) { |
|
5939 |
ControlFlowOptimizer optimizer = ControlFlowOptimizer(); |
|
5940 |
||
5941 |
// push the OSR entry block to the end so that we're not jumping over it. |
|
5942 |
BlockBegin* osr_entry = code->at(0)->end()->as_Base()->osr_entry(); |
|
5943 |
if (osr_entry) { |
|
5944 |
int index = osr_entry->linear_scan_number(); |
|
5945 |
assert(code->at(index) == osr_entry, "wrong index"); |
|
5946 |
code->remove_at(index); |
|
5947 |
code->append(osr_entry); |
|
5948 |
} |
|
5949 |
||
5950 |
optimizer.reorder_short_loops(code); |
|
5951 |
optimizer.delete_empty_blocks(code); |
|
5952 |
optimizer.delete_unnecessary_jumps(code); |
|
5953 |
optimizer.delete_jumps_to_return(code); |
|
5954 |
} |
|
5955 |
||
5956 |
void ControlFlowOptimizer::reorder_short_loop(BlockList* code, BlockBegin* header_block, int header_idx) { |
|
5957 |
int i = header_idx + 1; |
|
5958 |
int max_end = MIN2(header_idx + ShortLoopSize, code->length()); |
|
5959 |
while (i < max_end && code->at(i)->loop_depth() >= header_block->loop_depth()) { |
|
5960 |
i++; |
|
5961 |
} |
|
5962 |
||
5963 |
if (i == code->length() || code->at(i)->loop_depth() < header_block->loop_depth()) { |
|
5964 |
int end_idx = i - 1; |
|
5965 |
BlockBegin* end_block = code->at(end_idx); |
|
5966 |
||
5967 |
if (end_block->number_of_sux() == 1 && end_block->sux_at(0) == header_block) { |
|
5968 |
// short loop from header_idx to end_idx found -> reorder blocks such that |
|
5969 |
// the header_block is the last block instead of the first block of the loop |
|
5970 |
TRACE_LINEAR_SCAN(1, tty->print_cr("Reordering short loop: length %d, header B%d, end B%d", |
|
5971 |
end_idx - header_idx + 1, |
|
5972 |
header_block->block_id(), end_block->block_id())); |
|
5973 |
||
5974 |
for (int j = header_idx; j < end_idx; j++) { |
|
5975 |
code->at_put(j, code->at(j + 1)); |
|
5976 |
} |
|
5977 |
code->at_put(end_idx, header_block); |
|
5978 |
||
5979 |
// correct the flags so that any loop alignment occurs in the right place. |
|
5980 |
assert(code->at(end_idx)->is_set(BlockBegin::backward_branch_target_flag), "must be backward branch target"); |
|
5981 |
code->at(end_idx)->clear(BlockBegin::backward_branch_target_flag); |
|
5982 |
code->at(header_idx)->set(BlockBegin::backward_branch_target_flag); |
|
5983 |
} |
|
5984 |
} |
|
5985 |
} |
|
5986 |
||
5987 |
void ControlFlowOptimizer::reorder_short_loops(BlockList* code) { |
|
5988 |
for (int i = code->length() - 1; i >= 0; i--) { |
|
5989 |
BlockBegin* block = code->at(i); |
|
5990 |
||
5991 |
if (block->is_set(BlockBegin::linear_scan_loop_header_flag)) { |
|
5992 |
reorder_short_loop(code, block, i); |
|
5993 |
} |
|
5994 |
} |
|
5995 |
||
5996 |
DEBUG_ONLY(verify(code)); |
|
5997 |
} |
|
5998 |
||
5999 |
// only blocks with exactly one successor can be deleted. Such blocks |
|
6000 |
// must always end with an unconditional branch to this successor |
|
6001 |
bool ControlFlowOptimizer::can_delete_block(BlockBegin* block) { |
|
6002 |
if (block->number_of_sux() != 1 || block->number_of_exception_handlers() != 0 || block->is_entry_block()) { |
|
6003 |
return false; |
|
6004 |
} |
|
6005 |
||
6006 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
6007 |
||
6008 |
assert(instructions->length() >= 2, "block must have label and branch"); |
|
6009 |
assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label"); |
|
6010 |
assert(instructions->last()->as_OpBranch() != NULL, "last instrcution must always be a branch"); |
|
6011 |
assert(instructions->last()->as_OpBranch()->cond() == lir_cond_always, "branch must be unconditional"); |
|
6012 |
assert(instructions->last()->as_OpBranch()->block() == block->sux_at(0), "branch target must be the successor"); |
|
6013 |
||
6014 |
// block must have exactly one successor |
|
6015 |
||
6016 |
if (instructions->length() == 2 && instructions->last()->info() == NULL) { |
|
6017 |
return true; |
|
6018 |
} |
|
6019 |
return false; |
|
6020 |
} |
|
6021 |
||
6022 |
// substitute branch targets in all branch-instructions of this blocks |
|
6023 |
void ControlFlowOptimizer::substitute_branch_target(BlockBegin* block, BlockBegin* target_from, BlockBegin* target_to) { |
|
6024 |
TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting empty block: substituting from B%d to B%d inside B%d", target_from->block_id(), target_to->block_id(), block->block_id())); |
|
6025 |
||
6026 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
6027 |
||
6028 |
assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label"); |
|
6029 |
for (int i = instructions->length() - 1; i >= 1; i--) { |
|
6030 |
LIR_Op* op = instructions->at(i); |
|
6031 |
||
6032 |
if (op->code() == lir_branch || op->code() == lir_cond_float_branch) { |
|
6033 |
assert(op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch"); |
|
6034 |
LIR_OpBranch* branch = (LIR_OpBranch*)op; |
|
6035 |
||
6036 |
if (branch->block() == target_from) { |
|
6037 |
branch->change_block(target_to); |
|
6038 |
} |
|
6039 |
if (branch->ublock() == target_from) { |
|
6040 |
branch->change_ublock(target_to); |
|
6041 |
} |
|
6042 |
} |
|
6043 |
} |
|
6044 |
} |
|
6045 |
||
6046 |
void ControlFlowOptimizer::delete_empty_blocks(BlockList* code) { |
|
6047 |
int old_pos = 0; |
|
6048 |
int new_pos = 0; |
|
6049 |
int num_blocks = code->length(); |
|
6050 |
||
6051 |
while (old_pos < num_blocks) { |
|
6052 |
BlockBegin* block = code->at(old_pos); |
|
6053 |
||
6054 |
if (can_delete_block(block)) { |
|
6055 |
BlockBegin* new_target = block->sux_at(0); |
|
6056 |
||
6057 |
// propagate backward branch target flag for correct code alignment |
|
6058 |
if (block->is_set(BlockBegin::backward_branch_target_flag)) { |
|
6059 |
new_target->set(BlockBegin::backward_branch_target_flag); |
|
6060 |
} |
|
6061 |
||
6062 |
// collect a list with all predecessors that contains each predecessor only once |
|
6063 |
// the predecessors of cur are changed during the substitution, so a copy of the |
|
6064 |
// predecessor list is necessary |
|
6065 |
int j; |
|
6066 |
_original_preds.clear(); |
|
6067 |
for (j = block->number_of_preds() - 1; j >= 0; j--) { |
|
6068 |
BlockBegin* pred = block->pred_at(j); |
|
6069 |
if (_original_preds.index_of(pred) == -1) { |
|
6070 |
_original_preds.append(pred); |
|
6071 |
} |
|
6072 |
} |
|
6073 |
||
6074 |
for (j = _original_preds.length() - 1; j >= 0; j--) { |
|
6075 |
BlockBegin* pred = _original_preds.at(j); |
|
6076 |
substitute_branch_target(pred, block, new_target); |
|
6077 |
pred->substitute_sux(block, new_target); |
|
6078 |
} |
|
6079 |
} else { |
|
6080 |
// adjust position of this block in the block list if blocks before |
|
6081 |
// have been deleted |
|
6082 |
if (new_pos != old_pos) { |
|
6083 |
code->at_put(new_pos, code->at(old_pos)); |
|
6084 |
} |
|
6085 |
new_pos++; |
|
6086 |
} |
|
6087 |
old_pos++; |
|
6088 |
} |
|
6089 |
code->truncate(new_pos); |
|
6090 |
||
6091 |
DEBUG_ONLY(verify(code)); |
|
6092 |
} |
|
6093 |
||
6094 |
void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) { |
|
6095 |
// skip the last block because there a branch is always necessary |
|
6096 |
for (int i = code->length() - 2; i >= 0; i--) { |
|
6097 |
BlockBegin* block = code->at(i); |
|
6098 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
6099 |
||
6100 |
LIR_Op* last_op = instructions->last(); |
|
6101 |
if (last_op->code() == lir_branch) { |
|
6102 |
assert(last_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch"); |
|
6103 |
LIR_OpBranch* last_branch = (LIR_OpBranch*)last_op; |
|
6104 |
||
6105 |
assert(last_branch->block() != NULL, "last branch must always have a block as target"); |
|
6106 |
assert(last_branch->label() == last_branch->block()->label(), "must be equal"); |
|
6107 |
||
6108 |
if (last_branch->info() == NULL) { |
|
6109 |
if (last_branch->block() == code->at(i + 1)) { |
|
6110 |
||
6111 |
TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting unconditional branch at end of block B%d", block->block_id())); |
|
6112 |
||
6113 |
// delete last branch instruction |
|
6114 |
instructions->truncate(instructions->length() - 1); |
|
6115 |
||
6116 |
} else { |
|
6117 |
LIR_Op* prev_op = instructions->at(instructions->length() - 2); |
|
6118 |
if (prev_op->code() == lir_branch || prev_op->code() == lir_cond_float_branch) { |
|
6119 |
assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch"); |
|
6120 |
LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op; |
|
6121 |
||
6122 |
if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) { |
|
6123 |
||
6124 |
TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id())); |
|
6125 |
||
6126 |
// eliminate a conditional branch to the immediate successor |
|
6127 |
prev_branch->change_block(last_branch->block()); |
|
6128 |
prev_branch->negate_cond(); |
|
6129 |
instructions->truncate(instructions->length() - 1); |
|
6130 |
} |
|
6131 |
} |
|
6132 |
} |
|
6133 |
} |
|
6134 |
} |
|
6135 |
} |
|
6136 |
||
6137 |
DEBUG_ONLY(verify(code)); |
|
6138 |
} |
|
6139 |
||
6140 |
void ControlFlowOptimizer::delete_jumps_to_return(BlockList* code) { |
|
6141 |
#ifdef ASSERT |
|
6142 |
BitMap return_converted(BlockBegin::number_of_blocks()); |
|
6143 |
return_converted.clear(); |
|
6144 |
#endif |
|
6145 |
||
6146 |
for (int i = code->length() - 1; i >= 0; i--) { |
|
6147 |
BlockBegin* block = code->at(i); |
|
6148 |
LIR_OpList* cur_instructions = block->lir()->instructions_list(); |
|
6149 |
LIR_Op* cur_last_op = cur_instructions->last(); |
|
6150 |
||
6151 |
assert(cur_instructions->at(0)->code() == lir_label, "first instruction must always be a label"); |
|
6152 |
if (cur_instructions->length() == 2 && cur_last_op->code() == lir_return) { |
|
6153 |
// the block contains only a label and a return |
|
6154 |
// if a predecessor ends with an unconditional jump to this block, then the jump |
|
6155 |
// can be replaced with a return instruction |
|
6156 |
// |
|
6157 |
// Note: the original block with only a return statement cannot be deleted completely |
|
6158 |
// because the predecessors might have other (conditional) jumps to this block |
|
6159 |
// -> this may lead to unnecesary return instructions in the final code |
|
6160 |
||
6161 |
assert(cur_last_op->info() == NULL, "return instructions do not have debug information"); |
|
6162 |
assert(block->number_of_sux() == 0 || |
|
6163 |
(return_converted.at(block->block_id()) && block->number_of_sux() == 1), |
|
6164 |
"blocks that end with return must not have successors"); |
|
6165 |
||
6166 |
assert(cur_last_op->as_Op1() != NULL, "return must be LIR_Op1"); |
|
6167 |
LIR_Opr return_opr = ((LIR_Op1*)cur_last_op)->in_opr(); |
|
6168 |
||
6169 |
for (int j = block->number_of_preds() - 1; j >= 0; j--) { |
|
6170 |
BlockBegin* pred = block->pred_at(j); |
|
6171 |
LIR_OpList* pred_instructions = pred->lir()->instructions_list(); |
|
6172 |
LIR_Op* pred_last_op = pred_instructions->last(); |
|
6173 |
||
6174 |
if (pred_last_op->code() == lir_branch) { |
|
6175 |
assert(pred_last_op->as_OpBranch() != NULL, "branch must be LIR_OpBranch"); |
|
6176 |
LIR_OpBranch* pred_last_branch = (LIR_OpBranch*)pred_last_op; |
|
6177 |
||
6178 |
if (pred_last_branch->block() == block && pred_last_branch->cond() == lir_cond_always && pred_last_branch->info() == NULL) { |
|
6179 |
// replace the jump to a return with a direct return |
|
6180 |
// Note: currently the edge between the blocks is not deleted |
|
6181 |
pred_instructions->at_put(pred_instructions->length() - 1, new LIR_Op1(lir_return, return_opr)); |
|
6182 |
#ifdef ASSERT |
|
6183 |
return_converted.set_bit(pred->block_id()); |
|
6184 |
#endif |
|
6185 |
} |
|
6186 |
} |
|
6187 |
} |
|
6188 |
} |
|
6189 |
} |
|
6190 |
} |
|
6191 |
||
6192 |
||
6193 |
#ifdef ASSERT |
|
6194 |
void ControlFlowOptimizer::verify(BlockList* code) { |
|
6195 |
for (int i = 0; i < code->length(); i++) { |
|
6196 |
BlockBegin* block = code->at(i); |
|
6197 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
6198 |
||
6199 |
int j; |
|
6200 |
for (j = 0; j < instructions->length(); j++) { |
|
6201 |
LIR_OpBranch* op_branch = instructions->at(j)->as_OpBranch(); |
|
6202 |
||
6203 |
if (op_branch != NULL) { |
|
6204 |
assert(op_branch->block() == NULL || code->index_of(op_branch->block()) != -1, "branch target not valid"); |
|
6205 |
assert(op_branch->ublock() == NULL || code->index_of(op_branch->ublock()) != -1, "branch target not valid"); |
|
6206 |
} |
|
6207 |
} |
|
6208 |
||
6209 |
for (j = 0; j < block->number_of_sux() - 1; j++) { |
|
6210 |
BlockBegin* sux = block->sux_at(j); |
|
6211 |
assert(code->index_of(sux) != -1, "successor not valid"); |
|
6212 |
} |
|
6213 |
||
6214 |
for (j = 0; j < block->number_of_preds() - 1; j++) { |
|
6215 |
BlockBegin* pred = block->pred_at(j); |
|
6216 |
assert(code->index_of(pred) != -1, "successor not valid"); |
|
6217 |
} |
|
6218 |
} |
|
6219 |
} |
|
6220 |
#endif |
|
6221 |
||
6222 |
||
6223 |
#ifndef PRODUCT |
|
6224 |
||
6225 |
// Implementation of LinearStatistic |
|
6226 |
||
6227 |
const char* LinearScanStatistic::counter_name(int counter_idx) { |
|
6228 |
switch (counter_idx) { |
|
6229 |
case counter_method: return "compiled methods"; |
|
6230 |
case counter_fpu_method: return "methods using fpu"; |
|
6231 |
case counter_loop_method: return "methods with loops"; |
|
6232 |
case counter_exception_method:return "methods with xhandler"; |
|
6233 |
||
6234 |
case counter_loop: return "loops"; |
|
6235 |
case counter_block: return "blocks"; |
|
6236 |
case counter_loop_block: return "blocks inside loop"; |
|
6237 |
case counter_exception_block: return "exception handler entries"; |
|
6238 |
case counter_interval: return "intervals"; |
|
6239 |
case counter_fixed_interval: return "fixed intervals"; |
|
6240 |
case counter_range: return "ranges"; |
|
6241 |
case counter_fixed_range: return "fixed ranges"; |
|
6242 |
case counter_use_pos: return "use positions"; |
|
6243 |
case counter_fixed_use_pos: return "fixed use positions"; |
|
6244 |
case counter_spill_slots: return "spill slots"; |
|
6245 |
||
6246 |
// counter for classes of lir instructions |
|
6247 |
case counter_instruction: return "total instructions"; |
|
6248 |
case counter_label: return "labels"; |
|
6249 |
case counter_entry: return "method entries"; |
|
6250 |
case counter_return: return "method returns"; |
|
6251 |
case counter_call: return "method calls"; |
|
6252 |
case counter_move: return "moves"; |
|
6253 |
case counter_cmp: return "compare"; |
|
6254 |
case counter_cond_branch: return "conditional branches"; |
|
6255 |
case counter_uncond_branch: return "unconditional branches"; |
|
6256 |
case counter_stub_branch: return "branches to stub"; |
|
6257 |
case counter_alu: return "artithmetic + logic"; |
|
6258 |
case counter_alloc: return "allocations"; |
|
6259 |
case counter_sync: return "synchronisation"; |
|
6260 |
case counter_throw: return "throw"; |
|
6261 |
case counter_unwind: return "unwind"; |
|
6262 |
case counter_typecheck: return "type+null-checks"; |
|
6263 |
case counter_fpu_stack: return "fpu-stack"; |
|
6264 |
case counter_misc_inst: return "other instructions"; |
|
6265 |
case counter_other_inst: return "misc. instructions"; |
|
6266 |
||
6267 |
// counter for different types of moves |
|
6268 |
case counter_move_total: return "total moves"; |
|
6269 |
case counter_move_reg_reg: return "register->register"; |
|
6270 |
case counter_move_reg_stack: return "register->stack"; |
|
6271 |
case counter_move_stack_reg: return "stack->register"; |
|
6272 |
case counter_move_stack_stack:return "stack->stack"; |
|
6273 |
case counter_move_reg_mem: return "register->memory"; |
|
6274 |
case counter_move_mem_reg: return "memory->register"; |
|
6275 |
case counter_move_const_any: return "constant->any"; |
|
6276 |
||
6277 |
case blank_line_1: return ""; |
|
6278 |
case blank_line_2: return ""; |
|
6279 |
||
6280 |
default: ShouldNotReachHere(); return ""; |
|
6281 |
} |
|
6282 |
} |
|
6283 |
||
6284 |
LinearScanStatistic::Counter LinearScanStatistic::base_counter(int counter_idx) { |
|
6285 |
if (counter_idx == counter_fpu_method || counter_idx == counter_loop_method || counter_idx == counter_exception_method) { |
|
6286 |
return counter_method; |
|
6287 |
} else if (counter_idx == counter_loop_block || counter_idx == counter_exception_block) { |
|
6288 |
return counter_block; |
|
6289 |
} else if (counter_idx >= counter_instruction && counter_idx <= counter_other_inst) { |
|
6290 |
return counter_instruction; |
|
6291 |
} else if (counter_idx >= counter_move_total && counter_idx <= counter_move_const_any) { |
|
6292 |
return counter_move_total; |
|
6293 |
} |
|
6294 |
return invalid_counter; |
|
6295 |
} |
|
6296 |
||
6297 |
LinearScanStatistic::LinearScanStatistic() { |
|
6298 |
for (int i = 0; i < number_of_counters; i++) { |
|
6299 |
_counters_sum[i] = 0; |
|
6300 |
_counters_max[i] = -1; |
|
6301 |
} |
|
6302 |
||
6303 |
} |
|
6304 |
||
6305 |
// add the method-local numbers to the total sum |
|
6306 |
void LinearScanStatistic::sum_up(LinearScanStatistic &method_statistic) { |
|
6307 |
for (int i = 0; i < number_of_counters; i++) { |
|
6308 |
_counters_sum[i] += method_statistic._counters_sum[i]; |
|
6309 |
_counters_max[i] = MAX2(_counters_max[i], method_statistic._counters_sum[i]); |
|
6310 |
} |
|
6311 |
} |
|
6312 |
||
6313 |
void LinearScanStatistic::print(const char* title) { |
|
6314 |
if (CountLinearScan || TraceLinearScanLevel > 0) { |
|
6315 |
tty->cr(); |
|
6316 |
tty->print_cr("***** LinearScan statistic - %s *****", title); |
|
6317 |
||
6318 |
for (int i = 0; i < number_of_counters; i++) { |
|
6319 |
if (_counters_sum[i] > 0 || _counters_max[i] >= 0) { |
|
6320 |
tty->print("%25s: %8d", counter_name(i), _counters_sum[i]); |
|
6321 |
||
6322 |
if (base_counter(i) != invalid_counter) { |
|
6323 |
tty->print(" (%5.1f%%) ", _counters_sum[i] * 100.0 / _counters_sum[base_counter(i)]); |
|
6324 |
} else { |
|
6325 |
tty->print(" "); |
|
6326 |
} |
|
6327 |
||
6328 |
if (_counters_max[i] >= 0) { |
|
6329 |
tty->print("%8d", _counters_max[i]); |
|
6330 |
} |
|
6331 |
} |
|
6332 |
tty->cr(); |
|
6333 |
} |
|
6334 |
} |
|
6335 |
} |
|
6336 |
||
6337 |
void LinearScanStatistic::collect(LinearScan* allocator) { |
|
6338 |
inc_counter(counter_method); |
|
6339 |
if (allocator->has_fpu_registers()) { |
|
6340 |
inc_counter(counter_fpu_method); |
|
6341 |
} |
|
6342 |
if (allocator->num_loops() > 0) { |
|
6343 |
inc_counter(counter_loop_method); |
|
6344 |
} |
|
6345 |
inc_counter(counter_loop, allocator->num_loops()); |
|
6346 |
inc_counter(counter_spill_slots, allocator->max_spills()); |
|
6347 |
||
6348 |
int i; |
|
6349 |
for (i = 0; i < allocator->interval_count(); i++) { |
|
6350 |
Interval* cur = allocator->interval_at(i); |
|
6351 |
||
6352 |
if (cur != NULL) { |
|
6353 |
inc_counter(counter_interval); |
|
6354 |
inc_counter(counter_use_pos, cur->num_use_positions()); |
|
6355 |
if (LinearScan::is_precolored_interval(cur)) { |
|
6356 |
inc_counter(counter_fixed_interval); |
|
6357 |
inc_counter(counter_fixed_use_pos, cur->num_use_positions()); |
|
6358 |
} |
|
6359 |
||
6360 |
Range* range = cur->first(); |
|
6361 |
while (range != Range::end()) { |
|
6362 |
inc_counter(counter_range); |
|
6363 |
if (LinearScan::is_precolored_interval(cur)) { |
|
6364 |
inc_counter(counter_fixed_range); |
|
6365 |
} |
|
6366 |
range = range->next(); |
|
6367 |
} |
|
6368 |
} |
|
6369 |
} |
|
6370 |
||
6371 |
bool has_xhandlers = false; |
|
6372 |
// Note: only count blocks that are in code-emit order |
|
6373 |
for (i = 0; i < allocator->ir()->code()->length(); i++) { |
|
6374 |
BlockBegin* cur = allocator->ir()->code()->at(i); |
|
6375 |
||
6376 |
inc_counter(counter_block); |
|
6377 |
if (cur->loop_depth() > 0) { |
|
6378 |
inc_counter(counter_loop_block); |
|
6379 |
} |
|
6380 |
if (cur->is_set(BlockBegin::exception_entry_flag)) { |
|
6381 |
inc_counter(counter_exception_block); |
|
6382 |
has_xhandlers = true; |
|
6383 |
} |
|
6384 |
||
6385 |
LIR_OpList* instructions = cur->lir()->instructions_list(); |
|
6386 |
for (int j = 0; j < instructions->length(); j++) { |
|
6387 |
LIR_Op* op = instructions->at(j); |
|
6388 |
||
6389 |
inc_counter(counter_instruction); |
|
6390 |
||
6391 |
switch (op->code()) { |
|
6392 |
case lir_label: inc_counter(counter_label); break; |
|
6393 |
case lir_std_entry: |
|
6394 |
case lir_osr_entry: inc_counter(counter_entry); break; |
|
6395 |
case lir_return: inc_counter(counter_return); break; |
|
6396 |
||
6397 |
case lir_rtcall: |
|
6398 |
case lir_static_call: |
|
6399 |
case lir_optvirtual_call: |
|
6400 |
case lir_virtual_call: inc_counter(counter_call); break; |
|
6401 |
||
6402 |
case lir_move: { |
|
6403 |
inc_counter(counter_move); |
|
6404 |
inc_counter(counter_move_total); |
|
6405 |
||
6406 |
LIR_Opr in = op->as_Op1()->in_opr(); |
|
6407 |
LIR_Opr res = op->as_Op1()->result_opr(); |
|
6408 |
if (in->is_register()) { |
|
6409 |
if (res->is_register()) { |
|
6410 |
inc_counter(counter_move_reg_reg); |
|
6411 |
} else if (res->is_stack()) { |
|
6412 |
inc_counter(counter_move_reg_stack); |
|
6413 |
} else if (res->is_address()) { |
|
6414 |
inc_counter(counter_move_reg_mem); |
|
6415 |
} else { |
|
6416 |
ShouldNotReachHere(); |
|
6417 |
} |
|
6418 |
} else if (in->is_stack()) { |
|
6419 |
if (res->is_register()) { |
|
6420 |
inc_counter(counter_move_stack_reg); |
|
6421 |
} else { |
|
6422 |
inc_counter(counter_move_stack_stack); |
|
6423 |
} |
|
6424 |
} else if (in->is_address()) { |
|
6425 |
assert(res->is_register(), "must be"); |
|
6426 |
inc_counter(counter_move_mem_reg); |
|
6427 |
} else if (in->is_constant()) { |
|
6428 |
inc_counter(counter_move_const_any); |
|
6429 |
} else { |
|
6430 |
ShouldNotReachHere(); |
|
6431 |
} |
|
6432 |
break; |
|
6433 |
} |
|
6434 |
||
6435 |
case lir_cmp: inc_counter(counter_cmp); break; |
|
6436 |
||
6437 |
case lir_branch: |
|
6438 |
case lir_cond_float_branch: { |
|
6439 |
LIR_OpBranch* branch = op->as_OpBranch(); |
|
6440 |
if (branch->block() == NULL) { |
|
6441 |
inc_counter(counter_stub_branch); |
|
6442 |
} else if (branch->cond() == lir_cond_always) { |
|
6443 |
inc_counter(counter_uncond_branch); |
|
6444 |
} else { |
|
6445 |
inc_counter(counter_cond_branch); |
|
6446 |
} |
|
6447 |
break; |
|
6448 |
} |
|
6449 |
||
6450 |
case lir_neg: |
|
6451 |
case lir_add: |
|
6452 |
case lir_sub: |
|
6453 |
case lir_mul: |
|
6454 |
case lir_mul_strictfp: |
|
6455 |
case lir_div: |
|
6456 |
case lir_div_strictfp: |
|
6457 |
case lir_rem: |
|
6458 |
case lir_sqrt: |
|
6459 |
case lir_sin: |
|
6460 |
case lir_cos: |
|
6461 |
case lir_abs: |
|
6462 |
case lir_log10: |
|
6463 |
case lir_log: |
|
6464 |
case lir_logic_and: |
|
6465 |
case lir_logic_or: |
|
6466 |
case lir_logic_xor: |
|
6467 |
case lir_shl: |
|
6468 |
case lir_shr: |
|
6469 |
case lir_ushr: inc_counter(counter_alu); break; |
|
6470 |
||
6471 |
case lir_alloc_object: |
|
6472 |
case lir_alloc_array: inc_counter(counter_alloc); break; |
|
6473 |
||
6474 |
case lir_monaddr: |
|
6475 |
case lir_lock: |
|
6476 |
case lir_unlock: inc_counter(counter_sync); break; |
|
6477 |
||
6478 |
case lir_throw: inc_counter(counter_throw); break; |
|
6479 |
||
6480 |
case lir_unwind: inc_counter(counter_unwind); break; |
|
6481 |
||
6482 |
case lir_null_check: |
|
6483 |
case lir_leal: |
|
6484 |
case lir_instanceof: |
|
6485 |
case lir_checkcast: |
|
6486 |
case lir_store_check: inc_counter(counter_typecheck); break; |
|
6487 |
||
6488 |
case lir_fpop_raw: |
|
6489 |
case lir_fxch: |
|
6490 |
case lir_fld: inc_counter(counter_fpu_stack); break; |
|
6491 |
||
6492 |
case lir_nop: |
|
6493 |
case lir_push: |
|
6494 |
case lir_pop: |
|
6495 |
case lir_convert: |
|
6496 |
case lir_roundfp: |
|
6497 |
case lir_cmove: inc_counter(counter_misc_inst); break; |
|
6498 |
||
6499 |
default: inc_counter(counter_other_inst); break; |
|
6500 |
} |
|
6501 |
} |
|
6502 |
} |
|
6503 |
||
6504 |
if (has_xhandlers) { |
|
6505 |
inc_counter(counter_exception_method); |
|
6506 |
} |
|
6507 |
} |
|
6508 |
||
6509 |
void LinearScanStatistic::compute(LinearScan* allocator, LinearScanStatistic &global_statistic) { |
|
6510 |
if (CountLinearScan || TraceLinearScanLevel > 0) { |
|
6511 |
||
6512 |
LinearScanStatistic local_statistic = LinearScanStatistic(); |
|
6513 |
||
6514 |
local_statistic.collect(allocator); |
|
6515 |
global_statistic.sum_up(local_statistic); |
|
6516 |
||
6517 |
if (TraceLinearScanLevel > 2) { |
|
6518 |
local_statistic.print("current local statistic"); |
|
6519 |
} |
|
6520 |
} |
|
6521 |
} |
|
6522 |
||
6523 |
||
6524 |
// Implementation of LinearTimers |
|
6525 |
||
6526 |
LinearScanTimers::LinearScanTimers() { |
|
6527 |
for (int i = 0; i < number_of_timers; i++) { |
|
6528 |
timer(i)->reset(); |
|
6529 |
} |
|
6530 |
} |
|
6531 |
||
6532 |
const char* LinearScanTimers::timer_name(int idx) { |
|
6533 |
switch (idx) { |
|
6534 |
case timer_do_nothing: return "Nothing (Time Check)"; |
|
6535 |
case timer_number_instructions: return "Number Instructions"; |
|
6536 |
case timer_compute_local_live_sets: return "Local Live Sets"; |
|
6537 |
case timer_compute_global_live_sets: return "Global Live Sets"; |
|
6538 |
case timer_build_intervals: return "Build Intervals"; |
|
6539 |
case timer_sort_intervals_before: return "Sort Intervals Before"; |
|
6540 |
case timer_allocate_registers: return "Allocate Registers"; |
|
6541 |
case timer_resolve_data_flow: return "Resolve Data Flow"; |
|
6542 |
case timer_sort_intervals_after: return "Sort Intervals After"; |
|
6543 |
case timer_eliminate_spill_moves: return "Spill optimization"; |
|
6544 |
case timer_assign_reg_num: return "Assign Reg Num"; |
|
6545 |
case timer_allocate_fpu_stack: return "Allocate FPU Stack"; |
|
6546 |
case timer_optimize_lir: return "Optimize LIR"; |
|
6547 |
default: ShouldNotReachHere(); return ""; |
|
6548 |
} |
|
6549 |
} |
|
6550 |
||
6551 |
void LinearScanTimers::begin_method() { |
|
6552 |
if (TimeEachLinearScan) { |
|
6553 |
// reset all timers to measure only current method |
|
6554 |
for (int i = 0; i < number_of_timers; i++) { |
|
6555 |
timer(i)->reset(); |
|
6556 |
} |
|
6557 |
} |
|
6558 |
} |
|
6559 |
||
6560 |
void LinearScanTimers::end_method(LinearScan* allocator) { |
|
6561 |
if (TimeEachLinearScan) { |
|
6562 |
||
6563 |
double c = timer(timer_do_nothing)->seconds(); |
|
6564 |
double total = 0; |
|
6565 |
for (int i = 1; i < number_of_timers; i++) { |
|
6566 |
total += timer(i)->seconds() - c; |
|
6567 |
} |
|
6568 |
||
6569 |
if (total >= 0.0005) { |
|
6570 |
// print all information in one line for automatic processing |
|
6571 |
tty->print("@"); allocator->compilation()->method()->print_name(); |
|
6572 |
||
6573 |
tty->print("@ %d ", allocator->compilation()->method()->code_size()); |
|
6574 |
tty->print("@ %d ", allocator->block_at(allocator->block_count() - 1)->last_lir_instruction_id() / 2); |
|
6575 |
tty->print("@ %d ", allocator->block_count()); |
|
6576 |
tty->print("@ %d ", allocator->num_virtual_regs()); |
|
6577 |
tty->print("@ %d ", allocator->interval_count()); |
|
6578 |
tty->print("@ %d ", allocator->_num_calls); |
|
6579 |
tty->print("@ %d ", allocator->num_loops()); |
|
6580 |
||
6581 |
tty->print("@ %6.6f ", total); |
|
6582 |
for (int i = 1; i < number_of_timers; i++) { |
|
6583 |
tty->print("@ %4.1f ", ((timer(i)->seconds() - c) / total) * 100); |
|
6584 |
} |
|
6585 |
tty->cr(); |
|
6586 |
} |
|
6587 |
} |
|
6588 |
} |
|
6589 |
||
6590 |
void LinearScanTimers::print(double total_time) { |
|
6591 |
if (TimeLinearScan) { |
|
6592 |
// correction value: sum of dummy-timer that only measures the time that |
|
6593 |
// is necesary to start and stop itself |
|
6594 |
double c = timer(timer_do_nothing)->seconds(); |
|
6595 |
||
6596 |
for (int i = 0; i < number_of_timers; i++) { |
|
6597 |
double t = timer(i)->seconds(); |
|
6598 |
tty->print_cr(" %25s: %6.3f s (%4.1f%%) corrected: %6.3f s (%4.1f%%)", timer_name(i), t, (t / total_time) * 100.0, t - c, (t - c) / (total_time - 2 * number_of_timers * c) * 100); |
|
6599 |
} |
|
6600 |
} |
|
6601 |
} |
|
6602 |
||
6603 |
#endif // #ifndef PRODUCT |