author | pliden |
Mon, 14 May 2018 15:42:59 +0200 | |
changeset 50102 | 454fa295105c |
parent 47765 | b7c7428eaab9 |
child 51078 | fc6cfe40e32a |
permissions | -rw-r--r-- |
1 | 1 |
/* |
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
2 |
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5353
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5353
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5353
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "c1/c1_CFGPrinter.hpp" |
|
27 |
#include "c1/c1_CodeStubs.hpp" |
|
28 |
#include "c1/c1_Compilation.hpp" |
|
29 |
#include "c1/c1_FrameMap.hpp" |
|
30 |
#include "c1/c1_IR.hpp" |
|
31 |
#include "c1/c1_LIRGenerator.hpp" |
|
32 |
#include "c1/c1_LinearScan.hpp" |
|
33 |
#include "c1/c1_ValueStack.hpp" |
|
25715
d5a8dbdc5150
8049325: Introduce and clean up umbrella headers for the files in the cpu subdirectories.
goetz
parents:
24669
diff
changeset
|
34 |
#include "code/vmreg.inline.hpp" |
37161
e881f320966e
8150015: Integrate TraceTime with Unified Logging more seamlessly
rehn
parents:
36302
diff
changeset
|
35 |
#include "runtime/timerTrace.hpp" |
7397 | 36 |
#include "utilities/bitMap.inline.hpp" |
1 | 37 |
|
38 |
#ifndef PRODUCT |
|
39 |
||
40 |
static LinearScanStatistic _stat_before_alloc; |
|
41 |
static LinearScanStatistic _stat_after_asign; |
|
42 |
static LinearScanStatistic _stat_final; |
|
43 |
||
44 |
static LinearScanTimers _total_timer; |
|
45 |
||
46 |
// helper macro for short definition of timer |
|
47 |
#define TIME_LINEAR_SCAN(timer_name) TraceTime _block_timer("", _total_timer.timer(LinearScanTimers::timer_name), TimeLinearScan || TimeEachLinearScan, Verbose); |
|
48 |
||
49 |
// helper macro for short definition of trace-output inside code |
|
50 |
#define TRACE_LINEAR_SCAN(level, code) \ |
|
51 |
if (TraceLinearScanLevel >= level) { \ |
|
52 |
code; \ |
|
53 |
} |
|
54 |
||
55 |
#else |
|
56 |
||
57 |
#define TIME_LINEAR_SCAN(timer_name) |
|
58 |
#define TRACE_LINEAR_SCAN(level, code) |
|
59 |
||
60 |
#endif |
|
61 |
||
62 |
// Map BasicType to spill size in 32-bit words, matching VMReg's notion of words |
|
63 |
#ifdef _LP64 |
|
21102
1dd11ccfe9da
8026495: JVM Crashes when started with -XX:+DTraceMethodProbes on Solaris x86_64
iveresov
parents:
16611
diff
changeset
|
64 |
static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 2, 1, 2, 1, -1}; |
1 | 65 |
#else |
21102
1dd11ccfe9da
8026495: JVM Crashes when started with -XX:+DTraceMethodProbes on Solaris x86_64
iveresov
parents:
16611
diff
changeset
|
66 |
static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1, 1, 1, -1}; |
1 | 67 |
#endif |
68 |
||
69 |
||
70 |
// Implementation of LinearScan |
|
71 |
||
72 |
LinearScan::LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map) |
|
73 |
: _compilation(ir->compilation()) |
|
74 |
, _ir(ir) |
|
75 |
, _gen(gen) |
|
76 |
, _frame_map(frame_map) |
|
77 |
, _num_virtual_regs(gen->max_virtual_register_number()) |
|
78 |
, _has_fpu_registers(false) |
|
79 |
, _num_calls(-1) |
|
80 |
, _max_spills(0) |
|
81 |
, _unused_spill_slot(-1) |
|
82 |
, _intervals(0) // initialized later with correct length |
|
83 |
, _new_intervals_from_allocation(new IntervalList()) |
|
84 |
, _sorted_intervals(NULL) |
|
7705
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
85 |
, _needs_full_resort(false) |
1 | 86 |
, _lir_ops(0) // initialized later with correct length |
87 |
, _block_of_op(0) // initialized later with correct length |
|
88 |
, _has_info(0) |
|
89 |
, _has_call(0) |
|
90 |
, _scope_value_cache(0) // initialized later with correct length |
|
38177 | 91 |
, _interval_in_loop(0) // initialized later with correct length |
1 | 92 |
, _cached_blocks(*ir->linear_scan_order()) |
1066 | 93 |
#ifdef X86 |
1 | 94 |
, _fpu_stack_allocator(NULL) |
95 |
#endif |
|
96 |
{ |
|
97 |
assert(this->ir() != NULL, "check if valid"); |
|
98 |
assert(this->compilation() != NULL, "check if valid"); |
|
99 |
assert(this->gen() != NULL, "check if valid"); |
|
100 |
assert(this->frame_map() != NULL, "check if valid"); |
|
101 |
} |
|
102 |
||
103 |
||
104 |
// ********** functions for converting LIR-Operands to register numbers |
|
105 |
// |
|
106 |
// Emulate a flat register file comprising physical integer registers, |
|
107 |
// physical floating-point registers and virtual registers, in that order. |
|
108 |
// Virtual registers already have appropriate numbers, since V0 is |
|
109 |
// the number of physical registers. |
|
110 |
// Returns -1 for hi word if opr is a single word operand. |
|
111 |
// |
|
112 |
// Note: the inverse operation (calculating an operand for register numbers) |
|
113 |
// is done in calc_operand_for_interval() |
|
114 |
||
115 |
int LinearScan::reg_num(LIR_Opr opr) { |
|
116 |
assert(opr->is_register(), "should not call this otherwise"); |
|
117 |
||
118 |
if (opr->is_virtual_register()) { |
|
119 |
assert(opr->vreg_number() >= nof_regs, "found a virtual register with a fixed-register number"); |
|
120 |
return opr->vreg_number(); |
|
121 |
} else if (opr->is_single_cpu()) { |
|
122 |
return opr->cpu_regnr(); |
|
123 |
} else if (opr->is_double_cpu()) { |
|
124 |
return opr->cpu_regnrLo(); |
|
1066 | 125 |
#ifdef X86 |
1 | 126 |
} else if (opr->is_single_xmm()) { |
127 |
return opr->fpu_regnr() + pd_first_xmm_reg; |
|
128 |
} else if (opr->is_double_xmm()) { |
|
129 |
return opr->fpu_regnrLo() + pd_first_xmm_reg; |
|
130 |
#endif |
|
131 |
} else if (opr->is_single_fpu()) { |
|
132 |
return opr->fpu_regnr() + pd_first_fpu_reg; |
|
133 |
} else if (opr->is_double_fpu()) { |
|
134 |
return opr->fpu_regnrLo() + pd_first_fpu_reg; |
|
135 |
} else { |
|
136 |
ShouldNotReachHere(); |
|
1066 | 137 |
return -1; |
1 | 138 |
} |
139 |
} |
|
140 |
||
141 |
int LinearScan::reg_numHi(LIR_Opr opr) { |
|
142 |
assert(opr->is_register(), "should not call this otherwise"); |
|
143 |
||
144 |
if (opr->is_virtual_register()) { |
|
145 |
return -1; |
|
146 |
} else if (opr->is_single_cpu()) { |
|
147 |
return -1; |
|
148 |
} else if (opr->is_double_cpu()) { |
|
149 |
return opr->cpu_regnrHi(); |
|
1066 | 150 |
#ifdef X86 |
1 | 151 |
} else if (opr->is_single_xmm()) { |
152 |
return -1; |
|
153 |
} else if (opr->is_double_xmm()) { |
|
154 |
return -1; |
|
155 |
#endif |
|
156 |
} else if (opr->is_single_fpu()) { |
|
157 |
return -1; |
|
158 |
} else if (opr->is_double_fpu()) { |
|
159 |
return opr->fpu_regnrHi() + pd_first_fpu_reg; |
|
160 |
} else { |
|
161 |
ShouldNotReachHere(); |
|
1066 | 162 |
return -1; |
1 | 163 |
} |
164 |
} |
|
165 |
||
166 |
||
167 |
// ********** functions for classification of intervals |
|
168 |
||
169 |
bool LinearScan::is_precolored_interval(const Interval* i) { |
|
170 |
return i->reg_num() < LinearScan::nof_regs; |
|
171 |
} |
|
172 |
||
173 |
bool LinearScan::is_virtual_interval(const Interval* i) { |
|
174 |
return i->reg_num() >= LIR_OprDesc::vreg_base; |
|
175 |
} |
|
176 |
||
177 |
bool LinearScan::is_precolored_cpu_interval(const Interval* i) { |
|
178 |
return i->reg_num() < LinearScan::nof_cpu_regs; |
|
179 |
} |
|
180 |
||
181 |
bool LinearScan::is_virtual_cpu_interval(const Interval* i) { |
|
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
182 |
#if defined(__SOFTFP__) || defined(E500V2) |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
183 |
return i->reg_num() >= LIR_OprDesc::vreg_base; |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
184 |
#else |
1 | 185 |
return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() != T_FLOAT && i->type() != T_DOUBLE); |
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
186 |
#endif // __SOFTFP__ or E500V2 |
1 | 187 |
} |
188 |
||
189 |
bool LinearScan::is_precolored_fpu_interval(const Interval* i) { |
|
190 |
return i->reg_num() >= LinearScan::nof_cpu_regs && i->reg_num() < LinearScan::nof_regs; |
|
191 |
} |
|
192 |
||
193 |
bool LinearScan::is_virtual_fpu_interval(const Interval* i) { |
|
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
194 |
#if defined(__SOFTFP__) || defined(E500V2) |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
195 |
return false; |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
196 |
#else |
1 | 197 |
return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE); |
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
198 |
#endif // __SOFTFP__ or E500V2 |
1 | 199 |
} |
200 |
||
201 |
bool LinearScan::is_in_fpu_register(const Interval* i) { |
|
202 |
// fixed intervals not needed for FPU stack allocation |
|
203 |
return i->reg_num() >= nof_regs && pd_first_fpu_reg <= i->assigned_reg() && i->assigned_reg() <= pd_last_fpu_reg; |
|
204 |
} |
|
205 |
||
206 |
bool LinearScan::is_oop_interval(const Interval* i) { |
|
207 |
// fixed intervals never contain oops |
|
208 |
return i->reg_num() >= nof_regs && i->type() == T_OBJECT; |
|
209 |
} |
|
210 |
||
211 |
||
212 |
// ********** General helper functions |
|
213 |
||
214 |
// compute next unused stack index that can be used for spilling |
|
215 |
int LinearScan::allocate_spill_slot(bool double_word) { |
|
216 |
int spill_slot; |
|
217 |
if (double_word) { |
|
218 |
if ((_max_spills & 1) == 1) { |
|
219 |
// alignment of double-word values |
|
220 |
// the hole because of the alignment is filled with the next single-word value |
|
221 |
assert(_unused_spill_slot == -1, "wasting a spill slot"); |
|
222 |
_unused_spill_slot = _max_spills; |
|
223 |
_max_spills++; |
|
224 |
} |
|
225 |
spill_slot = _max_spills; |
|
226 |
_max_spills += 2; |
|
227 |
||
228 |
} else if (_unused_spill_slot != -1) { |
|
229 |
// re-use hole that was the result of a previous double-word alignment |
|
230 |
spill_slot = _unused_spill_slot; |
|
231 |
_unused_spill_slot = -1; |
|
232 |
||
233 |
} else { |
|
234 |
spill_slot = _max_spills; |
|
235 |
_max_spills++; |
|
236 |
} |
|
237 |
||
238 |
int result = spill_slot + LinearScan::nof_regs + frame_map()->argcount(); |
|
239 |
||
240 |
// the class OopMapValue uses only 11 bits for storing the name of the |
|
241 |
// oop location. So a stack slot bigger than 2^11 leads to an overflow |
|
242 |
// that is not reported in product builds. Prevent this by checking the |
|
243 |
// spill slot here (altough this value and the later used location name |
|
244 |
// are slightly different) |
|
245 |
if (result > 2000) { |
|
246 |
bailout("too many stack slots used"); |
|
247 |
} |
|
248 |
||
249 |
return result; |
|
250 |
} |
|
251 |
||
252 |
void LinearScan::assign_spill_slot(Interval* it) { |
|
253 |
// assign the canonical spill slot of the parent (if a part of the interval |
|
254 |
// is already spilled) or allocate a new spill slot |
|
255 |
if (it->canonical_spill_slot() >= 0) { |
|
256 |
it->assign_reg(it->canonical_spill_slot()); |
|
257 |
} else { |
|
258 |
int spill = allocate_spill_slot(type2spill_size[it->type()] == 2); |
|
259 |
it->set_canonical_spill_slot(spill); |
|
260 |
it->assign_reg(spill); |
|
261 |
} |
|
262 |
} |
|
263 |
||
264 |
void LinearScan::propagate_spill_slots() { |
|
265 |
if (!frame_map()->finalize_frame(max_spills())) { |
|
266 |
bailout("frame too large"); |
|
267 |
} |
|
268 |
} |
|
269 |
||
270 |
// create a new interval with a predefined reg_num |
|
271 |
// (only used for parent intervals that are created during the building phase) |
|
272 |
Interval* LinearScan::create_interval(int reg_num) { |
|
273 |
assert(_intervals.at(reg_num) == NULL, "overwriting exisiting interval"); |
|
274 |
||
275 |
Interval* interval = new Interval(reg_num); |
|
276 |
_intervals.at_put(reg_num, interval); |
|
277 |
||
278 |
// assign register number for precolored intervals |
|
279 |
if (reg_num < LIR_OprDesc::vreg_base) { |
|
280 |
interval->assign_reg(reg_num); |
|
281 |
} |
|
282 |
return interval; |
|
283 |
} |
|
284 |
||
285 |
// assign a new reg_num to the interval and append it to the list of intervals |
|
286 |
// (only used for child intervals that are created during register allocation) |
|
287 |
void LinearScan::append_interval(Interval* it) { |
|
288 |
it->set_reg_num(_intervals.length()); |
|
289 |
_intervals.append(it); |
|
290 |
_new_intervals_from_allocation->append(it); |
|
291 |
} |
|
292 |
||
293 |
// copy the vreg-flags if an interval is split |
|
294 |
void LinearScan::copy_register_flags(Interval* from, Interval* to) { |
|
295 |
if (gen()->is_vreg_flag_set(from->reg_num(), LIRGenerator::byte_reg)) { |
|
296 |
gen()->set_vreg_flag(to->reg_num(), LIRGenerator::byte_reg); |
|
297 |
} |
|
298 |
if (gen()->is_vreg_flag_set(from->reg_num(), LIRGenerator::callee_saved)) { |
|
299 |
gen()->set_vreg_flag(to->reg_num(), LIRGenerator::callee_saved); |
|
300 |
} |
|
301 |
||
302 |
// Note: do not copy the must_start_in_memory flag because it is not necessary for child |
|
303 |
// intervals (only the very beginning of the interval must be in memory) |
|
304 |
} |
|
305 |
||
306 |
||
307 |
// ********** spill move optimization |
|
308 |
// eliminate moves from register to stack if stack slot is known to be correct |
|
309 |
||
310 |
// called during building of intervals |
|
311 |
void LinearScan::change_spill_definition_pos(Interval* interval, int def_pos) { |
|
312 |
assert(interval->is_split_parent(), "can only be called for split parents"); |
|
313 |
||
314 |
switch (interval->spill_state()) { |
|
315 |
case noDefinitionFound: |
|
316 |
assert(interval->spill_definition_pos() == -1, "must no be set before"); |
|
317 |
interval->set_spill_definition_pos(def_pos); |
|
318 |
interval->set_spill_state(oneDefinitionFound); |
|
319 |
break; |
|
320 |
||
321 |
case oneDefinitionFound: |
|
322 |
assert(def_pos <= interval->spill_definition_pos(), "positions are processed in reverse order when intervals are created"); |
|
323 |
if (def_pos < interval->spill_definition_pos() - 2) { |
|
324 |
// second definition found, so no spill optimization possible for this interval |
|
325 |
interval->set_spill_state(noOptimization); |
|
326 |
} else { |
|
327 |
// two consecutive definitions (because of two-operand LIR form) |
|
328 |
assert(block_of_op_with_id(def_pos) == block_of_op_with_id(interval->spill_definition_pos()), "block must be equal"); |
|
329 |
} |
|
330 |
break; |
|
331 |
||
332 |
case noOptimization: |
|
333 |
// nothing to do |
|
334 |
break; |
|
335 |
||
336 |
default: |
|
337 |
assert(false, "other states not allowed at this time"); |
|
338 |
} |
|
339 |
} |
|
340 |
||
341 |
// called during register allocation |
|
342 |
void LinearScan::change_spill_state(Interval* interval, int spill_pos) { |
|
343 |
switch (interval->spill_state()) { |
|
344 |
case oneDefinitionFound: { |
|
345 |
int def_loop_depth = block_of_op_with_id(interval->spill_definition_pos())->loop_depth(); |
|
346 |
int spill_loop_depth = block_of_op_with_id(spill_pos)->loop_depth(); |
|
347 |
||
348 |
if (def_loop_depth < spill_loop_depth) { |
|
349 |
// the loop depth of the spilling position is higher then the loop depth |
|
350 |
// at the definition of the interval -> move write to memory out of loop |
|
351 |
// by storing at definitin of the interval |
|
352 |
interval->set_spill_state(storeAtDefinition); |
|
353 |
} else { |
|
354 |
// the interval is currently spilled only once, so for now there is no |
|
355 |
// reason to store the interval at the definition |
|
356 |
interval->set_spill_state(oneMoveInserted); |
|
357 |
} |
|
358 |
break; |
|
359 |
} |
|
360 |
||
361 |
case oneMoveInserted: { |
|
362 |
// the interval is spilled more then once, so it is better to store it to |
|
363 |
// memory at the definition |
|
364 |
interval->set_spill_state(storeAtDefinition); |
|
365 |
break; |
|
366 |
} |
|
367 |
||
368 |
case storeAtDefinition: |
|
369 |
case startInMemory: |
|
370 |
case noOptimization: |
|
371 |
case noDefinitionFound: |
|
372 |
// nothing to do |
|
373 |
break; |
|
374 |
||
375 |
default: |
|
376 |
assert(false, "other states not allowed at this time"); |
|
377 |
} |
|
378 |
} |
|
379 |
||
380 |
||
381 |
bool LinearScan::must_store_at_definition(const Interval* i) { |
|
382 |
return i->is_split_parent() && i->spill_state() == storeAtDefinition; |
|
383 |
} |
|
384 |
||
385 |
// called once before asignment of register numbers |
|
386 |
void LinearScan::eliminate_spill_moves() { |
|
387 |
TIME_LINEAR_SCAN(timer_eliminate_spill_moves); |
|
388 |
TRACE_LINEAR_SCAN(3, tty->print_cr("***** Eliminating unnecessary spill moves")); |
|
389 |
||
390 |
// collect all intervals that must be stored after their definion. |
|
391 |
// the list is sorted by Interval::spill_definition_pos |
|
392 |
Interval* interval; |
|
393 |
Interval* temp_list; |
|
394 |
create_unhandled_lists(&interval, &temp_list, must_store_at_definition, NULL); |
|
395 |
||
396 |
#ifdef ASSERT |
|
397 |
Interval* prev = NULL; |
|
398 |
Interval* temp = interval; |
|
399 |
while (temp != Interval::end()) { |
|
400 |
assert(temp->spill_definition_pos() > 0, "invalid spill definition pos"); |
|
401 |
if (prev != NULL) { |
|
402 |
assert(temp->from() >= prev->from(), "intervals not sorted"); |
|
403 |
assert(temp->spill_definition_pos() >= prev->spill_definition_pos(), "when intervals are sorted by from, then they must also be sorted by spill_definition_pos"); |
|
404 |
} |
|
405 |
||
406 |
assert(temp->canonical_spill_slot() >= LinearScan::nof_regs, "interval has no spill slot assigned"); |
|
407 |
assert(temp->spill_definition_pos() >= temp->from(), "invalid order"); |
|
408 |
assert(temp->spill_definition_pos() <= temp->from() + 2, "only intervals defined once at their start-pos can be optimized"); |
|
409 |
||
410 |
TRACE_LINEAR_SCAN(4, tty->print_cr("interval %d (from %d to %d) must be stored at %d", temp->reg_num(), temp->from(), temp->to(), temp->spill_definition_pos())); |
|
411 |
||
412 |
temp = temp->next(); |
|
413 |
} |
|
414 |
#endif |
|
415 |
||
416 |
LIR_InsertionBuffer insertion_buffer; |
|
417 |
int num_blocks = block_count(); |
|
418 |
for (int i = 0; i < num_blocks; i++) { |
|
419 |
BlockBegin* block = block_at(i); |
|
420 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
421 |
int num_inst = instructions->length(); |
|
422 |
bool has_new = false; |
|
423 |
||
424 |
// iterate all instructions of the block. skip the first because it is always a label |
|
425 |
for (int j = 1; j < num_inst; j++) { |
|
426 |
LIR_Op* op = instructions->at(j); |
|
427 |
int op_id = op->id(); |
|
428 |
||
429 |
if (op_id == -1) { |
|
430 |
// remove move from register to stack if the stack slot is guaranteed to be correct. |
|
431 |
// only moves that have been inserted by LinearScan can be removed. |
|
432 |
assert(op->code() == lir_move, "only moves can have a op_id of -1"); |
|
433 |
assert(op->as_Op1() != NULL, "move must be LIR_Op1"); |
|
434 |
assert(op->as_Op1()->result_opr()->is_virtual(), "LinearScan inserts only moves to virtual registers"); |
|
435 |
||
436 |
LIR_Op1* op1 = (LIR_Op1*)op; |
|
437 |
Interval* interval = interval_at(op1->result_opr()->vreg_number()); |
|
438 |
||
439 |
if (interval->assigned_reg() >= LinearScan::nof_regs && interval->always_in_memory()) { |
|
440 |
// move target is a stack slot that is always correct, so eliminate instruction |
|
441 |
TRACE_LINEAR_SCAN(4, tty->print_cr("eliminating move from interval %d to %d", op1->in_opr()->vreg_number(), op1->result_opr()->vreg_number())); |
|
442 |
instructions->at_put(j, NULL); // NULL-instructions are deleted by assign_reg_num |
|
443 |
} |
|
444 |
||
445 |
} else { |
|
446 |
// insert move from register to stack just after the beginning of the interval |
|
447 |
assert(interval == Interval::end() || interval->spill_definition_pos() >= op_id, "invalid order"); |
|
448 |
assert(interval == Interval::end() || (interval->is_split_parent() && interval->spill_state() == storeAtDefinition), "invalid interval"); |
|
449 |
||
450 |
while (interval != Interval::end() && interval->spill_definition_pos() == op_id) { |
|
451 |
if (!has_new) { |
|
452 |
// prepare insertion buffer (appended when all instructions of the block are processed) |
|
453 |
insertion_buffer.init(block->lir()); |
|
454 |
has_new = true; |
|
455 |
} |
|
456 |
||
457 |
LIR_Opr from_opr = operand_for_interval(interval); |
|
458 |
LIR_Opr to_opr = canonical_spill_opr(interval); |
|
459 |
assert(from_opr->is_fixed_cpu() || from_opr->is_fixed_fpu(), "from operand must be a register"); |
|
460 |
assert(to_opr->is_stack(), "to operand must be a stack slot"); |
|
461 |
||
462 |
insertion_buffer.move(j, from_opr, to_opr); |
|
463 |
TRACE_LINEAR_SCAN(4, tty->print_cr("inserting move after definition of interval %d to stack slot %d at op_id %d", interval->reg_num(), interval->canonical_spill_slot() - LinearScan::nof_regs, op_id)); |
|
464 |
||
465 |
interval = interval->next(); |
|
466 |
} |
|
467 |
} |
|
468 |
} // end of instruction iteration |
|
469 |
||
470 |
if (has_new) { |
|
471 |
block->lir()->append(&insertion_buffer); |
|
472 |
} |
|
473 |
} // end of block iteration |
|
474 |
||
475 |
assert(interval == Interval::end(), "missed an interval"); |
|
476 |
} |
|
477 |
||
478 |
||
479 |
// ********** Phase 1: number all instructions in all blocks |
|
480 |
// Compute depth-first and linear scan block orders, and number LIR_Op nodes for linear scan. |
|
481 |
||
482 |
void LinearScan::number_instructions() { |
|
483 |
{ |
|
484 |
// dummy-timer to measure the cost of the timer itself |
|
485 |
// (this time is then subtracted from all other timers to get the real value) |
|
486 |
TIME_LINEAR_SCAN(timer_do_nothing); |
|
487 |
} |
|
488 |
TIME_LINEAR_SCAN(timer_number_instructions); |
|
489 |
||
490 |
// Assign IDs to LIR nodes and build a mapping, lir_ops, from ID to LIR_Op node. |
|
491 |
int num_blocks = block_count(); |
|
492 |
int num_instructions = 0; |
|
493 |
int i; |
|
494 |
for (i = 0; i < num_blocks; i++) { |
|
495 |
num_instructions += block_at(i)->lir()->instructions_list()->length(); |
|
496 |
} |
|
497 |
||
498 |
// initialize with correct length |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
499 |
_lir_ops = LIR_OpArray(num_instructions, num_instructions, NULL); |
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
500 |
_block_of_op = BlockBeginArray(num_instructions, num_instructions, NULL); |
1 | 501 |
|
502 |
int op_id = 0; |
|
503 |
int idx = 0; |
|
504 |
||
505 |
for (i = 0; i < num_blocks; i++) { |
|
506 |
BlockBegin* block = block_at(i); |
|
507 |
block->set_first_lir_instruction_id(op_id); |
|
508 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
509 |
||
510 |
int num_inst = instructions->length(); |
|
511 |
for (int j = 0; j < num_inst; j++) { |
|
512 |
LIR_Op* op = instructions->at(j); |
|
513 |
op->set_id(op_id); |
|
514 |
||
515 |
_lir_ops.at_put(idx, op); |
|
516 |
_block_of_op.at_put(idx, block); |
|
517 |
assert(lir_op_with_id(op_id) == op, "must match"); |
|
518 |
||
519 |
idx++; |
|
520 |
op_id += 2; // numbering of lir_ops by two |
|
521 |
} |
|
522 |
block->set_last_lir_instruction_id(op_id - 2); |
|
523 |
} |
|
524 |
assert(idx == num_instructions, "must match"); |
|
525 |
assert(idx * 2 == op_id, "must match"); |
|
526 |
||
38177 | 527 |
_has_call.initialize(num_instructions); |
528 |
_has_info.initialize(num_instructions); |
|
1 | 529 |
} |
530 |
||
531 |
||
532 |
// ********** Phase 2: compute local live sets separately for each block |
|
533 |
// (sets live_gen and live_kill for each block) |
|
534 |
||
535 |
void LinearScan::set_live_gen_kill(Value value, LIR_Op* op, BitMap& live_gen, BitMap& live_kill) { |
|
536 |
LIR_Opr opr = value->operand(); |
|
537 |
Constant* con = value->as_Constant(); |
|
538 |
||
539 |
// check some asumptions about debug information |
|
540 |
assert(!value->type()->is_illegal(), "if this local is used by the interpreter it shouldn't be of indeterminate type"); |
|
541 |
assert(con == NULL || opr->is_virtual() || opr->is_constant() || opr->is_illegal(), "asumption: Constant instructions have only constant operands"); |
|
542 |
assert(con != NULL || opr->is_virtual(), "asumption: non-Constant instructions have only virtual operands"); |
|
543 |
||
544 |
if ((con == NULL || con->is_pinned()) && opr->is_register()) { |
|
545 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
546 |
int reg = opr->vreg_number(); |
|
547 |
if (!live_kill.at(reg)) { |
|
548 |
live_gen.set_bit(reg); |
|
549 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" Setting live_gen for value %c%d, LIR op_id %d, register number %d", value->type()->tchar(), value->id(), op->id(), reg)); |
|
550 |
} |
|
551 |
} |
|
552 |
} |
|
553 |
||
554 |
||
555 |
void LinearScan::compute_local_live_sets() { |
|
556 |
TIME_LINEAR_SCAN(timer_compute_local_live_sets); |
|
557 |
||
558 |
int num_blocks = block_count(); |
|
559 |
int live_size = live_set_size(); |
|
560 |
bool local_has_fpu_registers = false; |
|
561 |
int local_num_calls = 0; |
|
562 |
LIR_OpVisitState visitor; |
|
563 |
||
564 |
BitMap2D local_interval_in_loop = BitMap2D(_num_virtual_regs, num_loops()); |
|
565 |
||
566 |
// iterate all blocks |
|
567 |
for (int i = 0; i < num_blocks; i++) { |
|
568 |
BlockBegin* block = block_at(i); |
|
569 |
||
39219
1b33aa56ed18
8155638: Resource allocated BitMaps are often cleared twice
stefank
parents:
38658
diff
changeset
|
570 |
ResourceBitMap live_gen(live_size); |
1b33aa56ed18
8155638: Resource allocated BitMaps are often cleared twice
stefank
parents:
38658
diff
changeset
|
571 |
ResourceBitMap live_kill(live_size); |
1 | 572 |
|
573 |
if (block->is_set(BlockBegin::exception_entry_flag)) { |
|
574 |
// Phi functions at the begin of an exception handler are |
|
575 |
// implicitly defined (= killed) at the beginning of the block. |
|
576 |
for_each_phi_fun(block, phi, |
|
577 |
live_kill.set_bit(phi->operand()->vreg_number()) |
|
578 |
); |
|
579 |
} |
|
580 |
||
581 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
582 |
int num_inst = instructions->length(); |
|
583 |
||
584 |
// iterate all instructions of the block. skip the first because it is always a label |
|
585 |
assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label"); |
|
586 |
for (int j = 1; j < num_inst; j++) { |
|
587 |
LIR_Op* op = instructions->at(j); |
|
588 |
||
589 |
// visit operation to collect all operands |
|
590 |
visitor.visit(op); |
|
591 |
||
592 |
if (visitor.has_call()) { |
|
593 |
_has_call.set_bit(op->id() >> 1); |
|
594 |
local_num_calls++; |
|
595 |
} |
|
596 |
if (visitor.info_count() > 0) { |
|
597 |
_has_info.set_bit(op->id() >> 1); |
|
598 |
} |
|
599 |
||
600 |
// iterate input operands of instruction |
|
601 |
int k, n, reg; |
|
602 |
n = visitor.opr_count(LIR_OpVisitState::inputMode); |
|
603 |
for (k = 0; k < n; k++) { |
|
604 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, k); |
|
605 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
606 |
||
607 |
if (opr->is_virtual_register()) { |
|
608 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
609 |
reg = opr->vreg_number(); |
|
610 |
if (!live_kill.at(reg)) { |
|
611 |
live_gen.set_bit(reg); |
|
612 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" Setting live_gen for register %d at instruction %d", reg, op->id())); |
|
613 |
} |
|
614 |
if (block->loop_index() >= 0) { |
|
615 |
local_interval_in_loop.set_bit(reg, block->loop_index()); |
|
616 |
} |
|
617 |
local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu(); |
|
618 |
} |
|
619 |
||
620 |
#ifdef ASSERT |
|
621 |
// fixed intervals are never live at block boundaries, so |
|
622 |
// they need not be processed in live sets. |
|
623 |
// this is checked by these assertions to be sure about it. |
|
624 |
// the entry block may have incoming values in registers, which is ok. |
|
625 |
if (!opr->is_virtual_register() && block != ir()->start()) { |
|
626 |
reg = reg_num(opr); |
|
627 |
if (is_processed_reg_num(reg)) { |
|
628 |
assert(live_kill.at(reg), "using fixed register that is not defined in this block"); |
|
629 |
} |
|
630 |
reg = reg_numHi(opr); |
|
631 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
632 |
assert(live_kill.at(reg), "using fixed register that is not defined in this block"); |
|
633 |
} |
|
634 |
} |
|
635 |
#endif |
|
636 |
} |
|
637 |
||
638 |
// Add uses of live locals from interpreter's point of view for proper debug information generation |
|
639 |
n = visitor.info_count(); |
|
640 |
for (k = 0; k < n; k++) { |
|
641 |
CodeEmitInfo* info = visitor.info_at(k); |
|
642 |
ValueStack* stack = info->stack(); |
|
643 |
for_each_state_value(stack, value, |
|
644 |
set_live_gen_kill(value, op, live_gen, live_kill) |
|
645 |
); |
|
646 |
} |
|
647 |
||
648 |
// iterate temp operands of instruction |
|
649 |
n = visitor.opr_count(LIR_OpVisitState::tempMode); |
|
650 |
for (k = 0; k < n; k++) { |
|
651 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, k); |
|
652 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
653 |
||
654 |
if (opr->is_virtual_register()) { |
|
655 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
656 |
reg = opr->vreg_number(); |
|
657 |
live_kill.set_bit(reg); |
|
658 |
if (block->loop_index() >= 0) { |
|
659 |
local_interval_in_loop.set_bit(reg, block->loop_index()); |
|
660 |
} |
|
661 |
local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu(); |
|
662 |
} |
|
663 |
||
664 |
#ifdef ASSERT |
|
665 |
// fixed intervals are never live at block boundaries, so |
|
666 |
// they need not be processed in live sets |
|
667 |
// process them only in debug mode so that this can be checked |
|
668 |
if (!opr->is_virtual_register()) { |
|
669 |
reg = reg_num(opr); |
|
670 |
if (is_processed_reg_num(reg)) { |
|
671 |
live_kill.set_bit(reg_num(opr)); |
|
672 |
} |
|
673 |
reg = reg_numHi(opr); |
|
674 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
675 |
live_kill.set_bit(reg); |
|
676 |
} |
|
677 |
} |
|
678 |
#endif |
|
679 |
} |
|
680 |
||
681 |
// iterate output operands of instruction |
|
682 |
n = visitor.opr_count(LIR_OpVisitState::outputMode); |
|
683 |
for (k = 0; k < n; k++) { |
|
684 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, k); |
|
685 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
686 |
||
687 |
if (opr->is_virtual_register()) { |
|
688 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
689 |
reg = opr->vreg_number(); |
|
690 |
live_kill.set_bit(reg); |
|
691 |
if (block->loop_index() >= 0) { |
|
692 |
local_interval_in_loop.set_bit(reg, block->loop_index()); |
|
693 |
} |
|
694 |
local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu(); |
|
695 |
} |
|
696 |
||
697 |
#ifdef ASSERT |
|
698 |
// fixed intervals are never live at block boundaries, so |
|
699 |
// they need not be processed in live sets |
|
700 |
// process them only in debug mode so that this can be checked |
|
701 |
if (!opr->is_virtual_register()) { |
|
702 |
reg = reg_num(opr); |
|
703 |
if (is_processed_reg_num(reg)) { |
|
704 |
live_kill.set_bit(reg_num(opr)); |
|
705 |
} |
|
706 |
reg = reg_numHi(opr); |
|
707 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
708 |
live_kill.set_bit(reg); |
|
709 |
} |
|
710 |
} |
|
711 |
#endif |
|
712 |
} |
|
713 |
} // end of instruction iteration |
|
714 |
||
715 |
block->set_live_gen (live_gen); |
|
716 |
block->set_live_kill(live_kill); |
|
39219
1b33aa56ed18
8155638: Resource allocated BitMaps are often cleared twice
stefank
parents:
38658
diff
changeset
|
717 |
block->set_live_in (ResourceBitMap(live_size)); |
1b33aa56ed18
8155638: Resource allocated BitMaps are often cleared twice
stefank
parents:
38658
diff
changeset
|
718 |
block->set_live_out (ResourceBitMap(live_size)); |
1 | 719 |
|
720 |
TRACE_LINEAR_SCAN(4, tty->print("live_gen B%d ", block->block_id()); print_bitmap(block->live_gen())); |
|
721 |
TRACE_LINEAR_SCAN(4, tty->print("live_kill B%d ", block->block_id()); print_bitmap(block->live_kill())); |
|
722 |
} // end of block iteration |
|
723 |
||
724 |
// propagate local calculated information into LinearScan object |
|
725 |
_has_fpu_registers = local_has_fpu_registers; |
|
726 |
compilation()->set_has_fpu_code(local_has_fpu_registers); |
|
727 |
||
728 |
_num_calls = local_num_calls; |
|
729 |
_interval_in_loop = local_interval_in_loop; |
|
730 |
} |
|
731 |
||
732 |
||
733 |
// ********** Phase 3: perform a backward dataflow analysis to compute global live sets |
|
734 |
// (sets live_in and live_out for each block) |
|
735 |
||
736 |
void LinearScan::compute_global_live_sets() { |
|
737 |
TIME_LINEAR_SCAN(timer_compute_global_live_sets); |
|
738 |
||
739 |
int num_blocks = block_count(); |
|
740 |
bool change_occurred; |
|
741 |
bool change_occurred_in_block; |
|
742 |
int iteration_count = 0; |
|
39219
1b33aa56ed18
8155638: Resource allocated BitMaps are often cleared twice
stefank
parents:
38658
diff
changeset
|
743 |
ResourceBitMap live_out(live_set_size()); // scratch set for calculations |
1 | 744 |
|
745 |
// Perform a backward dataflow analysis to compute live_out and live_in for each block. |
|
746 |
// The loop is executed until a fixpoint is reached (no changes in an iteration) |
|
747 |
// Exception handlers must be processed because not all live values are |
|
748 |
// present in the state array, e.g. because of global value numbering |
|
749 |
do { |
|
750 |
change_occurred = false; |
|
751 |
||
752 |
// iterate all blocks in reverse order |
|
753 |
for (int i = num_blocks - 1; i >= 0; i--) { |
|
754 |
BlockBegin* block = block_at(i); |
|
755 |
||
756 |
change_occurred_in_block = false; |
|
757 |
||
758 |
// live_out(block) is the union of live_in(sux), for successors sux of block |
|
759 |
int n = block->number_of_sux(); |
|
760 |
int e = block->number_of_exception_handlers(); |
|
761 |
if (n + e > 0) { |
|
762 |
// block has successors |
|
763 |
if (n > 0) { |
|
764 |
live_out.set_from(block->sux_at(0)->live_in()); |
|
765 |
for (int j = 1; j < n; j++) { |
|
766 |
live_out.set_union(block->sux_at(j)->live_in()); |
|
767 |
} |
|
768 |
} else { |
|
769 |
live_out.clear(); |
|
770 |
} |
|
771 |
for (int j = 0; j < e; j++) { |
|
772 |
live_out.set_union(block->exception_handler_at(j)->live_in()); |
|
773 |
} |
|
774 |
||
775 |
if (!block->live_out().is_same(live_out)) { |
|
776 |
// A change occurred. Swap the old and new live out sets to avoid copying. |
|
38177 | 777 |
ResourceBitMap temp = block->live_out(); |
1 | 778 |
block->set_live_out(live_out); |
779 |
live_out = temp; |
|
780 |
||
781 |
change_occurred = true; |
|
782 |
change_occurred_in_block = true; |
|
783 |
} |
|
784 |
} |
|
785 |
||
786 |
if (iteration_count == 0 || change_occurred_in_block) { |
|
787 |
// live_in(block) is the union of live_gen(block) with (live_out(block) & !live_kill(block)) |
|
788 |
// note: live_in has to be computed only in first iteration or if live_out has changed! |
|
38177 | 789 |
ResourceBitMap live_in = block->live_in(); |
1 | 790 |
live_in.set_from(block->live_out()); |
791 |
live_in.set_difference(block->live_kill()); |
|
792 |
live_in.set_union(block->live_gen()); |
|
793 |
} |
|
794 |
||
795 |
#ifndef PRODUCT |
|
796 |
if (TraceLinearScanLevel >= 4) { |
|
797 |
char c = ' '; |
|
798 |
if (iteration_count == 0 || change_occurred_in_block) { |
|
799 |
c = '*'; |
|
800 |
} |
|
801 |
tty->print("(%d) live_in%c B%d ", iteration_count, c, block->block_id()); print_bitmap(block->live_in()); |
|
802 |
tty->print("(%d) live_out%c B%d ", iteration_count, c, block->block_id()); print_bitmap(block->live_out()); |
|
803 |
} |
|
804 |
#endif |
|
805 |
} |
|
806 |
iteration_count++; |
|
807 |
||
808 |
if (change_occurred && iteration_count > 50) { |
|
809 |
BAILOUT("too many iterations in compute_global_live_sets"); |
|
810 |
} |
|
811 |
} while (change_occurred); |
|
812 |
||
813 |
||
814 |
#ifdef ASSERT |
|
815 |
// check that fixed intervals are not live at block boundaries |
|
816 |
// (live set must be empty at fixed intervals) |
|
817 |
for (int i = 0; i < num_blocks; i++) { |
|
818 |
BlockBegin* block = block_at(i); |
|
819 |
for (int j = 0; j < LIR_OprDesc::vreg_base; j++) { |
|
820 |
assert(block->live_in().at(j) == false, "live_in set of fixed register must be empty"); |
|
821 |
assert(block->live_out().at(j) == false, "live_out set of fixed register must be empty"); |
|
822 |
assert(block->live_gen().at(j) == false, "live_gen set of fixed register must be empty"); |
|
823 |
} |
|
824 |
} |
|
825 |
#endif |
|
826 |
||
827 |
// check that the live_in set of the first block is empty |
|
38177 | 828 |
ResourceBitMap live_in_args(ir()->start()->live_in().size()); |
1 | 829 |
if (!ir()->start()->live_in().is_same(live_in_args)) { |
830 |
#ifdef ASSERT |
|
831 |
tty->print_cr("Error: live_in set of first block must be empty (when this fails, virtual registers are used before they are defined)"); |
|
832 |
tty->print_cr("affected registers:"); |
|
833 |
print_bitmap(ir()->start()->live_in()); |
|
834 |
||
835 |
// print some additional information to simplify debugging |
|
836 |
for (unsigned int i = 0; i < ir()->start()->live_in().size(); i++) { |
|
837 |
if (ir()->start()->live_in().at(i)) { |
|
838 |
Instruction* instr = gen()->instruction_for_vreg(i); |
|
839 |
tty->print_cr("* vreg %d (HIR instruction %c%d)", i, instr == NULL ? ' ' : instr->type()->tchar(), instr == NULL ? 0 : instr->id()); |
|
840 |
||
841 |
for (int j = 0; j < num_blocks; j++) { |
|
842 |
BlockBegin* block = block_at(j); |
|
843 |
if (block->live_gen().at(i)) { |
|
844 |
tty->print_cr(" used in block B%d", block->block_id()); |
|
845 |
} |
|
846 |
if (block->live_kill().at(i)) { |
|
847 |
tty->print_cr(" defined in block B%d", block->block_id()); |
|
848 |
} |
|
849 |
} |
|
850 |
} |
|
851 |
} |
|
852 |
||
853 |
#endif |
|
854 |
// when this fails, virtual registers are used before they are defined. |
|
855 |
assert(false, "live_in set of first block must be empty"); |
|
856 |
// bailout of if this occurs in product mode. |
|
857 |
bailout("live_in set of first block not empty"); |
|
858 |
} |
|
859 |
} |
|
860 |
||
861 |
||
862 |
// ********** Phase 4: build intervals |
|
863 |
// (fills the list _intervals) |
|
864 |
||
865 |
void LinearScan::add_use(Value value, int from, int to, IntervalUseKind use_kind) { |
|
866 |
assert(!value->type()->is_illegal(), "if this value is used by the interpreter it shouldn't be of indeterminate type"); |
|
867 |
LIR_Opr opr = value->operand(); |
|
868 |
Constant* con = value->as_Constant(); |
|
869 |
||
870 |
if ((con == NULL || con->is_pinned()) && opr->is_register()) { |
|
871 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
872 |
add_use(opr, from, to, use_kind); |
|
873 |
} |
|
874 |
} |
|
875 |
||
876 |
||
877 |
void LinearScan::add_def(LIR_Opr opr, int def_pos, IntervalUseKind use_kind) { |
|
878 |
TRACE_LINEAR_SCAN(2, tty->print(" def "); opr->print(tty); tty->print_cr(" def_pos %d (%d)", def_pos, use_kind)); |
|
879 |
assert(opr->is_register(), "should not be called otherwise"); |
|
880 |
||
881 |
if (opr->is_virtual_register()) { |
|
882 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
883 |
add_def(opr->vreg_number(), def_pos, use_kind, opr->type_register()); |
|
884 |
||
885 |
} else { |
|
886 |
int reg = reg_num(opr); |
|
887 |
if (is_processed_reg_num(reg)) { |
|
888 |
add_def(reg, def_pos, use_kind, opr->type_register()); |
|
889 |
} |
|
890 |
reg = reg_numHi(opr); |
|
891 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
892 |
add_def(reg, def_pos, use_kind, opr->type_register()); |
|
893 |
} |
|
894 |
} |
|
895 |
} |
|
896 |
||
897 |
void LinearScan::add_use(LIR_Opr opr, int from, int to, IntervalUseKind use_kind) { |
|
898 |
TRACE_LINEAR_SCAN(2, tty->print(" use "); opr->print(tty); tty->print_cr(" from %d to %d (%d)", from, to, use_kind)); |
|
899 |
assert(opr->is_register(), "should not be called otherwise"); |
|
900 |
||
901 |
if (opr->is_virtual_register()) { |
|
902 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
903 |
add_use(opr->vreg_number(), from, to, use_kind, opr->type_register()); |
|
904 |
||
905 |
} else { |
|
906 |
int reg = reg_num(opr); |
|
907 |
if (is_processed_reg_num(reg)) { |
|
908 |
add_use(reg, from, to, use_kind, opr->type_register()); |
|
909 |
} |
|
910 |
reg = reg_numHi(opr); |
|
911 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
912 |
add_use(reg, from, to, use_kind, opr->type_register()); |
|
913 |
} |
|
914 |
} |
|
915 |
} |
|
916 |
||
917 |
void LinearScan::add_temp(LIR_Opr opr, int temp_pos, IntervalUseKind use_kind) { |
|
918 |
TRACE_LINEAR_SCAN(2, tty->print(" temp "); opr->print(tty); tty->print_cr(" temp_pos %d (%d)", temp_pos, use_kind)); |
|
919 |
assert(opr->is_register(), "should not be called otherwise"); |
|
920 |
||
921 |
if (opr->is_virtual_register()) { |
|
922 |
assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below"); |
|
923 |
add_temp(opr->vreg_number(), temp_pos, use_kind, opr->type_register()); |
|
924 |
||
925 |
} else { |
|
926 |
int reg = reg_num(opr); |
|
927 |
if (is_processed_reg_num(reg)) { |
|
928 |
add_temp(reg, temp_pos, use_kind, opr->type_register()); |
|
929 |
} |
|
930 |
reg = reg_numHi(opr); |
|
931 |
if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) { |
|
932 |
add_temp(reg, temp_pos, use_kind, opr->type_register()); |
|
933 |
} |
|
934 |
} |
|
935 |
} |
|
936 |
||
937 |
||
938 |
void LinearScan::add_def(int reg_num, int def_pos, IntervalUseKind use_kind, BasicType type) { |
|
939 |
Interval* interval = interval_at(reg_num); |
|
940 |
if (interval != NULL) { |
|
941 |
assert(interval->reg_num() == reg_num, "wrong interval"); |
|
942 |
||
943 |
if (type != T_ILLEGAL) { |
|
944 |
interval->set_type(type); |
|
945 |
} |
|
946 |
||
947 |
Range* r = interval->first(); |
|
948 |
if (r->from() <= def_pos) { |
|
949 |
// Update the starting point (when a range is first created for a use, its |
|
950 |
// start is the beginning of the current block until a def is encountered.) |
|
951 |
r->set_from(def_pos); |
|
952 |
interval->add_use_pos(def_pos, use_kind); |
|
953 |
||
954 |
} else { |
|
955 |
// Dead value - make vacuous interval |
|
956 |
// also add use_kind for dead intervals |
|
957 |
interval->add_range(def_pos, def_pos + 1); |
|
958 |
interval->add_use_pos(def_pos, use_kind); |
|
959 |
TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: def of reg %d at %d occurs without use", reg_num, def_pos)); |
|
960 |
} |
|
961 |
||
962 |
} else { |
|
963 |
// Dead value - make vacuous interval |
|
964 |
// also add use_kind for dead intervals |
|
965 |
interval = create_interval(reg_num); |
|
966 |
if (type != T_ILLEGAL) { |
|
967 |
interval->set_type(type); |
|
968 |
} |
|
969 |
||
970 |
interval->add_range(def_pos, def_pos + 1); |
|
971 |
interval->add_use_pos(def_pos, use_kind); |
|
972 |
TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: dead value %d at %d in live intervals", reg_num, def_pos)); |
|
973 |
} |
|
974 |
||
975 |
change_spill_definition_pos(interval, def_pos); |
|
976 |
if (use_kind == noUse && interval->spill_state() <= startInMemory) { |
|
977 |
// detection of method-parameters and roundfp-results |
|
978 |
// TODO: move this directly to position where use-kind is computed |
|
979 |
interval->set_spill_state(startInMemory); |
|
980 |
} |
|
981 |
} |
|
982 |
||
983 |
void LinearScan::add_use(int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type) { |
|
984 |
Interval* interval = interval_at(reg_num); |
|
985 |
if (interval == NULL) { |
|
986 |
interval = create_interval(reg_num); |
|
987 |
} |
|
988 |
assert(interval->reg_num() == reg_num, "wrong interval"); |
|
989 |
||
990 |
if (type != T_ILLEGAL) { |
|
991 |
interval->set_type(type); |
|
992 |
} |
|
993 |
||
994 |
interval->add_range(from, to); |
|
995 |
interval->add_use_pos(to, use_kind); |
|
996 |
} |
|
997 |
||
998 |
void LinearScan::add_temp(int reg_num, int temp_pos, IntervalUseKind use_kind, BasicType type) { |
|
999 |
Interval* interval = interval_at(reg_num); |
|
1000 |
if (interval == NULL) { |
|
1001 |
interval = create_interval(reg_num); |
|
1002 |
} |
|
1003 |
assert(interval->reg_num() == reg_num, "wrong interval"); |
|
1004 |
||
1005 |
if (type != T_ILLEGAL) { |
|
1006 |
interval->set_type(type); |
|
1007 |
} |
|
1008 |
||
1009 |
interval->add_range(temp_pos, temp_pos + 1); |
|
1010 |
interval->add_use_pos(temp_pos, use_kind); |
|
1011 |
} |
|
1012 |
||
1013 |
||
1014 |
// the results of this functions are used for optimizing spilling and reloading |
|
1015 |
// if the functions return shouldHaveRegister and the interval is spilled, |
|
1016 |
// it is not reloaded to a register. |
|
1017 |
IntervalUseKind LinearScan::use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr) { |
|
1018 |
if (op->code() == lir_move) { |
|
1019 |
assert(op->as_Op1() != NULL, "lir_move must be LIR_Op1"); |
|
1020 |
LIR_Op1* move = (LIR_Op1*)op; |
|
1021 |
LIR_Opr res = move->result_opr(); |
|
1022 |
bool result_in_memory = res->is_virtual() && gen()->is_vreg_flag_set(res->vreg_number(), LIRGenerator::must_start_in_memory); |
|
1023 |
||
1024 |
if (result_in_memory) { |
|
1025 |
// Begin of an interval with must_start_in_memory set. |
|
1026 |
// This interval will always get a stack slot first, so return noUse. |
|
1027 |
return noUse; |
|
1028 |
||
1029 |
} else if (move->in_opr()->is_stack()) { |
|
1030 |
// method argument (condition must be equal to handle_method_arguments) |
|
1031 |
return noUse; |
|
1032 |
||
1033 |
} else if (move->in_opr()->is_register() && move->result_opr()->is_register()) { |
|
1034 |
// Move from register to register |
|
1035 |
if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) { |
|
1036 |
// special handling of phi-function moves inside osr-entry blocks |
|
1037 |
// input operand must have a register instead of output operand (leads to better register allocation) |
|
1038 |
return shouldHaveRegister; |
|
1039 |
} |
|
1040 |
} |
|
1041 |
} |
|
1042 |
||
1043 |
if (opr->is_virtual() && |
|
1044 |
gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::must_start_in_memory)) { |
|
1045 |
// result is a stack-slot, so prevent immediate reloading |
|
1046 |
return noUse; |
|
1047 |
} |
|
1048 |
||
1049 |
// all other operands require a register |
|
1050 |
return mustHaveRegister; |
|
1051 |
} |
|
1052 |
||
1053 |
IntervalUseKind LinearScan::use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr) { |
|
1054 |
if (op->code() == lir_move) { |
|
1055 |
assert(op->as_Op1() != NULL, "lir_move must be LIR_Op1"); |
|
1056 |
LIR_Op1* move = (LIR_Op1*)op; |
|
1057 |
LIR_Opr res = move->result_opr(); |
|
1058 |
bool result_in_memory = res->is_virtual() && gen()->is_vreg_flag_set(res->vreg_number(), LIRGenerator::must_start_in_memory); |
|
1059 |
||
1060 |
if (result_in_memory) { |
|
1061 |
// Move to an interval with must_start_in_memory set. |
|
1062 |
// To avoid moves from stack to stack (not allowed) force the input operand to a register |
|
1063 |
return mustHaveRegister; |
|
1064 |
||
1065 |
} else if (move->in_opr()->is_register() && move->result_opr()->is_register()) { |
|
1066 |
// Move from register to register |
|
1067 |
if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) { |
|
1068 |
// special handling of phi-function moves inside osr-entry blocks |
|
1069 |
// input operand must have a register instead of output operand (leads to better register allocation) |
|
1070 |
return mustHaveRegister; |
|
1071 |
} |
|
1072 |
||
1073 |
// The input operand is not forced to a register (moves from stack to register are allowed), |
|
1074 |
// but it is faster if the input operand is in a register |
|
1075 |
return shouldHaveRegister; |
|
1076 |
} |
|
1077 |
} |
|
1078 |
||
1079 |
||
42063
dca9294d9f59
8166561: [s390] Adaptions needed for s390 port in C1 and C2.
goetz
parents:
39219
diff
changeset
|
1080 |
#if defined(X86) || defined(S390) |
1 | 1081 |
if (op->code() == lir_cmove) { |
1082 |
// conditional moves can handle stack operands |
|
1083 |
assert(op->result_opr()->is_register(), "result must always be in a register"); |
|
1084 |
return shouldHaveRegister; |
|
1085 |
} |
|
1086 |
||
1087 |
// optimizations for second input operand of arithmehtic operations on Intel |
|
1088 |
// this operand is allowed to be on the stack in some cases |
|
1089 |
BasicType opr_type = opr->type_register(); |
|
1090 |
if (opr_type == T_FLOAT || opr_type == T_DOUBLE) { |
|
42063
dca9294d9f59
8166561: [s390] Adaptions needed for s390 port in C1 and C2.
goetz
parents:
39219
diff
changeset
|
1091 |
if ((UseSSE == 1 && opr_type == T_FLOAT) || UseSSE >= 2 S390_ONLY(|| true)) { |
1 | 1092 |
// SSE float instruction (T_DOUBLE only supported with SSE2) |
1093 |
switch (op->code()) { |
|
1094 |
case lir_cmp: |
|
1095 |
case lir_add: |
|
1096 |
case lir_sub: |
|
1097 |
case lir_mul: |
|
1098 |
case lir_div: |
|
1099 |
{ |
|
1100 |
assert(op->as_Op2() != NULL, "must be LIR_Op2"); |
|
1101 |
LIR_Op2* op2 = (LIR_Op2*)op; |
|
1102 |
if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) { |
|
1103 |
assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register"); |
|
1104 |
return shouldHaveRegister; |
|
1105 |
} |
|
1106 |
} |
|
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
1107 |
default: |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
1108 |
break; |
1 | 1109 |
} |
1110 |
} else { |
|
1111 |
// FPU stack float instruction |
|
1112 |
switch (op->code()) { |
|
1113 |
case lir_add: |
|
1114 |
case lir_sub: |
|
1115 |
case lir_mul: |
|
1116 |
case lir_div: |
|
1117 |
{ |
|
1118 |
assert(op->as_Op2() != NULL, "must be LIR_Op2"); |
|
1119 |
LIR_Op2* op2 = (LIR_Op2*)op; |
|
1120 |
if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) { |
|
1121 |
assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register"); |
|
1122 |
return shouldHaveRegister; |
|
1123 |
} |
|
1124 |
} |
|
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
1125 |
default: |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
1126 |
break; |
1 | 1127 |
} |
1128 |
} |
|
21525 | 1129 |
// We want to sometimes use logical operations on pointers, in particular in GC barriers. |
1130 |
// Since 64bit logical operations do not current support operands on stack, we have to make sure |
|
1131 |
// T_OBJECT doesn't get spilled along with T_LONG. |
|
1132 |
} else if (opr_type != T_LONG LP64_ONLY(&& opr_type != T_OBJECT)) { |
|
1 | 1133 |
// integer instruction (note: long operands must always be in register) |
1134 |
switch (op->code()) { |
|
1135 |
case lir_cmp: |
|
1136 |
case lir_add: |
|
1137 |
case lir_sub: |
|
1138 |
case lir_logic_and: |
|
1139 |
case lir_logic_or: |
|
1140 |
case lir_logic_xor: |
|
1141 |
{ |
|
1142 |
assert(op->as_Op2() != NULL, "must be LIR_Op2"); |
|
1143 |
LIR_Op2* op2 = (LIR_Op2*)op; |
|
1144 |
if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) { |
|
1145 |
assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register"); |
|
1146 |
return shouldHaveRegister; |
|
1147 |
} |
|
1148 |
} |
|
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
1149 |
default: |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
1150 |
break; |
1 | 1151 |
} |
1152 |
} |
|
42063
dca9294d9f59
8166561: [s390] Adaptions needed for s390 port in C1 and C2.
goetz
parents:
39219
diff
changeset
|
1153 |
#endif // X86 S390 |
1 | 1154 |
|
1155 |
// all other operands require a register |
|
1156 |
return mustHaveRegister; |
|
1157 |
} |
|
1158 |
||
1159 |
||
1160 |
void LinearScan::handle_method_arguments(LIR_Op* op) { |
|
1161 |
// special handling for method arguments (moves from stack to virtual register): |
|
1162 |
// the interval gets no register assigned, but the stack slot. |
|
1163 |
// it is split before the first use by the register allocator. |
|
1164 |
||
1165 |
if (op->code() == lir_move) { |
|
1166 |
assert(op->as_Op1() != NULL, "must be LIR_Op1"); |
|
1167 |
LIR_Op1* move = (LIR_Op1*)op; |
|
1168 |
||
1169 |
if (move->in_opr()->is_stack()) { |
|
1170 |
#ifdef ASSERT |
|
1171 |
int arg_size = compilation()->method()->arg_size(); |
|
1172 |
LIR_Opr o = move->in_opr(); |
|
1173 |
if (o->is_single_stack()) { |
|
1174 |
assert(o->single_stack_ix() >= 0 && o->single_stack_ix() < arg_size, "out of range"); |
|
1175 |
} else if (o->is_double_stack()) { |
|
1176 |
assert(o->double_stack_ix() >= 0 && o->double_stack_ix() < arg_size, "out of range"); |
|
1177 |
} else { |
|
1178 |
ShouldNotReachHere(); |
|
1179 |
} |
|
1180 |
||
1181 |
assert(move->id() > 0, "invalid id"); |
|
1182 |
assert(block_of_op_with_id(move->id())->number_of_preds() == 0, "move from stack must be in first block"); |
|
1183 |
assert(move->result_opr()->is_virtual(), "result of move must be a virtual register"); |
|
1184 |
||
1185 |
TRACE_LINEAR_SCAN(4, tty->print_cr("found move from stack slot %d to vreg %d", o->is_single_stack() ? o->single_stack_ix() : o->double_stack_ix(), reg_num(move->result_opr()))); |
|
1186 |
#endif |
|
1187 |
||
1188 |
Interval* interval = interval_at(reg_num(move->result_opr())); |
|
1189 |
||
1190 |
int stack_slot = LinearScan::nof_regs + (move->in_opr()->is_single_stack() ? move->in_opr()->single_stack_ix() : move->in_opr()->double_stack_ix()); |
|
1191 |
interval->set_canonical_spill_slot(stack_slot); |
|
1192 |
interval->assign_reg(stack_slot); |
|
1193 |
} |
|
1194 |
} |
|
1195 |
} |
|
1196 |
||
1197 |
void LinearScan::handle_doubleword_moves(LIR_Op* op) { |
|
1198 |
// special handling for doubleword move from memory to register: |
|
1199 |
// in this case the registers of the input address and the result |
|
1200 |
// registers must not overlap -> add a temp range for the input registers |
|
1201 |
if (op->code() == lir_move) { |
|
1202 |
assert(op->as_Op1() != NULL, "must be LIR_Op1"); |
|
1203 |
LIR_Op1* move = (LIR_Op1*)op; |
|
1204 |
||
1205 |
if (move->result_opr()->is_double_cpu() && move->in_opr()->is_pointer()) { |
|
1206 |
LIR_Address* address = move->in_opr()->as_address_ptr(); |
|
1207 |
if (address != NULL) { |
|
1208 |
if (address->base()->is_valid()) { |
|
1209 |
add_temp(address->base(), op->id(), noUse); |
|
1210 |
} |
|
1211 |
if (address->index()->is_valid()) { |
|
1212 |
add_temp(address->index(), op->id(), noUse); |
|
1213 |
} |
|
1214 |
} |
|
1215 |
} |
|
1216 |
} |
|
1217 |
} |
|
1218 |
||
1219 |
void LinearScan::add_register_hints(LIR_Op* op) { |
|
1220 |
switch (op->code()) { |
|
1221 |
case lir_move: // fall through |
|
1222 |
case lir_convert: { |
|
1223 |
assert(op->as_Op1() != NULL, "lir_move, lir_convert must be LIR_Op1"); |
|
1224 |
LIR_Op1* move = (LIR_Op1*)op; |
|
1225 |
||
1226 |
LIR_Opr move_from = move->in_opr(); |
|
1227 |
LIR_Opr move_to = move->result_opr(); |
|
1228 |
||
1229 |
if (move_to->is_register() && move_from->is_register()) { |
|
1230 |
Interval* from = interval_at(reg_num(move_from)); |
|
1231 |
Interval* to = interval_at(reg_num(move_to)); |
|
1232 |
if (from != NULL && to != NULL) { |
|
1233 |
to->set_register_hint(from); |
|
1234 |
TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", move->id(), from->reg_num(), to->reg_num())); |
|
1235 |
} |
|
1236 |
} |
|
1237 |
break; |
|
1238 |
} |
|
1239 |
case lir_cmove: { |
|
1240 |
assert(op->as_Op2() != NULL, "lir_cmove must be LIR_Op2"); |
|
1241 |
LIR_Op2* cmove = (LIR_Op2*)op; |
|
1242 |
||
1243 |
LIR_Opr move_from = cmove->in_opr1(); |
|
1244 |
LIR_Opr move_to = cmove->result_opr(); |
|
1245 |
||
1246 |
if (move_to->is_register() && move_from->is_register()) { |
|
1247 |
Interval* from = interval_at(reg_num(move_from)); |
|
1248 |
Interval* to = interval_at(reg_num(move_to)); |
|
1249 |
if (from != NULL && to != NULL) { |
|
1250 |
to->set_register_hint(from); |
|
1251 |
TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", cmove->id(), from->reg_num(), to->reg_num())); |
|
1252 |
} |
|
1253 |
} |
|
1254 |
break; |
|
1255 |
} |
|
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
1256 |
default: |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
1257 |
break; |
1 | 1258 |
} |
1259 |
} |
|
1260 |
||
1261 |
||
1262 |
void LinearScan::build_intervals() { |
|
1263 |
TIME_LINEAR_SCAN(timer_build_intervals); |
|
1264 |
||
1265 |
// initialize interval list with expected number of intervals |
|
1266 |
// (32 is added to have some space for split children without having to resize the list) |
|
1267 |
_intervals = IntervalList(num_virtual_regs() + 32); |
|
1268 |
// initialize all slots that are used by build_intervals |
|
1269 |
_intervals.at_put_grow(num_virtual_regs() - 1, NULL, NULL); |
|
1270 |
||
1271 |
// create a list with all caller-save registers (cpu, fpu, xmm) |
|
1272 |
// when an instruction is a call, a temp range is created for all these registers |
|
1273 |
int num_caller_save_registers = 0; |
|
1274 |
int caller_save_registers[LinearScan::nof_regs]; |
|
1275 |
||
1276 |
int i; |
|
7427 | 1277 |
for (i = 0; i < FrameMap::nof_caller_save_cpu_regs(); i++) { |
1 | 1278 |
LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i); |
1279 |
assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands"); |
|
1280 |
assert(reg_numHi(opr) == -1, "missing addition of range for hi-register"); |
|
1281 |
caller_save_registers[num_caller_save_registers++] = reg_num(opr); |
|
1282 |
} |
|
1283 |
||
1284 |
// temp ranges for fpu registers are only created when the method has |
|
1285 |
// virtual fpu operands. Otherwise no allocation for fpu registers is |
|
1286 |
// perfomed and so the temp ranges would be useless |
|
1287 |
if (has_fpu_registers()) { |
|
1066 | 1288 |
#ifdef X86 |
1 | 1289 |
if (UseSSE < 2) { |
1290 |
#endif |
|
1291 |
for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) { |
|
1292 |
LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i); |
|
1293 |
assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands"); |
|
1294 |
assert(reg_numHi(opr) == -1, "missing addition of range for hi-register"); |
|
1295 |
caller_save_registers[num_caller_save_registers++] = reg_num(opr); |
|
1296 |
} |
|
1066 | 1297 |
#ifdef X86 |
1 | 1298 |
} |
1299 |
if (UseSSE > 0) { |
|
30624 | 1300 |
int num_caller_save_xmm_regs = FrameMap::get_num_caller_save_xmms(); |
1301 |
for (i = 0; i < num_caller_save_xmm_regs; i ++) { |
|
1 | 1302 |
LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(i); |
1303 |
assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands"); |
|
1304 |
assert(reg_numHi(opr) == -1, "missing addition of range for hi-register"); |
|
1305 |
caller_save_registers[num_caller_save_registers++] = reg_num(opr); |
|
1306 |
} |
|
1307 |
} |
|
1308 |
#endif |
|
1309 |
} |
|
1310 |
assert(num_caller_save_registers <= LinearScan::nof_regs, "out of bounds"); |
|
1311 |
||
1312 |
||
1313 |
LIR_OpVisitState visitor; |
|
1314 |
||
1315 |
// iterate all blocks in reverse order |
|
1316 |
for (i = block_count() - 1; i >= 0; i--) { |
|
1317 |
BlockBegin* block = block_at(i); |
|
1318 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
1319 |
int block_from = block->first_lir_instruction_id(); |
|
1320 |
int block_to = block->last_lir_instruction_id(); |
|
1321 |
||
1322 |
assert(block_from == instructions->at(0)->id(), "must be"); |
|
1323 |
assert(block_to == instructions->at(instructions->length() - 1)->id(), "must be"); |
|
1324 |
||
1325 |
// Update intervals for registers live at the end of this block; |
|
38177 | 1326 |
ResourceBitMap live = block->live_out(); |
1066 | 1327 |
int size = (int)live.size(); |
1328 |
for (int number = (int)live.get_next_one_offset(0, size); number < size; number = (int)live.get_next_one_offset(number + 1, size)) { |
|
1 | 1329 |
assert(live.at(number), "should not stop here otherwise"); |
1330 |
assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds"); |
|
1331 |
TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2)); |
|
1332 |
||
1333 |
add_use(number, block_from, block_to + 2, noUse, T_ILLEGAL); |
|
1334 |
||
1335 |
// add special use positions for loop-end blocks when the |
|
1336 |
// interval is used anywhere inside this loop. It's possible |
|
1337 |
// that the block was part of a non-natural loop, so it might |
|
1338 |
// have an invalid loop index. |
|
1339 |
if (block->is_set(BlockBegin::linear_scan_loop_end_flag) && |
|
1340 |
block->loop_index() != -1 && |
|
1341 |
is_interval_in_loop(number, block->loop_index())) { |
|
1342 |
interval_at(number)->add_use_pos(block_to + 1, loopEndMarker); |
|
1343 |
} |
|
1344 |
} |
|
1345 |
||
1346 |
// iterate all instructions of the block in reverse order. |
|
1347 |
// skip the first instruction because it is always a label |
|
1348 |
// definitions of intervals are processed before uses |
|
1349 |
assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label"); |
|
1350 |
for (int j = instructions->length() - 1; j >= 1; j--) { |
|
1351 |
LIR_Op* op = instructions->at(j); |
|
1352 |
int op_id = op->id(); |
|
1353 |
||
1354 |
// visit operation to collect all operands |
|
1355 |
visitor.visit(op); |
|
1356 |
||
1357 |
// add a temp range for each register if operation destroys caller-save registers |
|
1358 |
if (visitor.has_call()) { |
|
1359 |
for (int k = 0; k < num_caller_save_registers; k++) { |
|
1360 |
add_temp(caller_save_registers[k], op_id, noUse, T_ILLEGAL); |
|
1361 |
} |
|
1362 |
TRACE_LINEAR_SCAN(4, tty->print_cr("operation destroys all caller-save registers")); |
|
1363 |
} |
|
1364 |
||
1365 |
// Add any platform dependent temps |
|
1366 |
pd_add_temps(op); |
|
1367 |
||
1368 |
// visit definitions (output and temp operands) |
|
1369 |
int k, n; |
|
1370 |
n = visitor.opr_count(LIR_OpVisitState::outputMode); |
|
1371 |
for (k = 0; k < n; k++) { |
|
1372 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, k); |
|
1373 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
1374 |
add_def(opr, op_id, use_kind_of_output_operand(op, opr)); |
|
1375 |
} |
|
1376 |
||
1377 |
n = visitor.opr_count(LIR_OpVisitState::tempMode); |
|
1378 |
for (k = 0; k < n; k++) { |
|
1379 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, k); |
|
1380 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
1381 |
add_temp(opr, op_id, mustHaveRegister); |
|
1382 |
} |
|
1383 |
||
1384 |
// visit uses (input operands) |
|
1385 |
n = visitor.opr_count(LIR_OpVisitState::inputMode); |
|
1386 |
for (k = 0; k < n; k++) { |
|
1387 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, k); |
|
1388 |
assert(opr->is_register(), "visitor should only return register operands"); |
|
1389 |
add_use(opr, block_from, op_id, use_kind_of_input_operand(op, opr)); |
|
1390 |
} |
|
1391 |
||
1392 |
// Add uses of live locals from interpreter's point of view for proper |
|
1393 |
// debug information generation |
|
1394 |
// Treat these operands as temp values (if the life range is extended |
|
1395 |
// to a call site, the value would be in a register at the call otherwise) |
|
1396 |
n = visitor.info_count(); |
|
1397 |
for (k = 0; k < n; k++) { |
|
1398 |
CodeEmitInfo* info = visitor.info_at(k); |
|
1399 |
ValueStack* stack = info->stack(); |
|
1400 |
for_each_state_value(stack, value, |
|
1401 |
add_use(value, block_from, op_id + 1, noUse); |
|
1402 |
); |
|
1403 |
} |
|
1404 |
||
1405 |
// special steps for some instructions (especially moves) |
|
1406 |
handle_method_arguments(op); |
|
1407 |
handle_doubleword_moves(op); |
|
1408 |
add_register_hints(op); |
|
1409 |
||
1410 |
} // end of instruction iteration |
|
1411 |
} // end of block iteration |
|
1412 |
||
1413 |
||
1414 |
// add the range [0, 1[ to all fixed intervals |
|
1415 |
// -> the register allocator need not handle unhandled fixed intervals |
|
1416 |
for (int n = 0; n < LinearScan::nof_regs; n++) { |
|
1417 |
Interval* interval = interval_at(n); |
|
1418 |
if (interval != NULL) { |
|
1419 |
interval->add_range(0, 1); |
|
1420 |
} |
|
1421 |
} |
|
1422 |
} |
|
1423 |
||
1424 |
||
1425 |
// ********** Phase 5: actual register allocation |
|
1426 |
||
1427 |
int LinearScan::interval_cmp(Interval** a, Interval** b) { |
|
1428 |
if (*a != NULL) { |
|
1429 |
if (*b != NULL) { |
|
1430 |
return (*a)->from() - (*b)->from(); |
|
1431 |
} else { |
|
1432 |
return -1; |
|
1433 |
} |
|
1434 |
} else { |
|
1435 |
if (*b != NULL) { |
|
1436 |
return 1; |
|
1437 |
} else { |
|
1438 |
return 0; |
|
1439 |
} |
|
1440 |
} |
|
1441 |
} |
|
1442 |
||
1443 |
#ifndef PRODUCT |
|
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1444 |
int interval_cmp(Interval* const& l, Interval* const& r) { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1445 |
return l->from() - r->from(); |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1446 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1447 |
|
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1448 |
bool find_interval(Interval* interval, IntervalArray* intervals) { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1449 |
bool found; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1450 |
int idx = intervals->find_sorted<Interval*, interval_cmp>(interval, found); |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1451 |
|
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1452 |
if (!found) { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1453 |
return false; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1454 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1455 |
|
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1456 |
int from = interval->from(); |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1457 |
|
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1458 |
// The index we've found using binary search is pointing to an interval |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1459 |
// that is defined in the same place as the interval we were looking for. |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1460 |
// So now we have to look around that index and find exact interval. |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1461 |
for (int i = idx; i >= 0; i--) { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1462 |
if (intervals->at(i) == interval) { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1463 |
return true; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1464 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1465 |
if (intervals->at(i)->from() != from) { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1466 |
break; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1467 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1468 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1469 |
|
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1470 |
for (int i = idx + 1; i < intervals->length(); i++) { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1471 |
if (intervals->at(i) == interval) { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1472 |
return true; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1473 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1474 |
if (intervals->at(i)->from() != from) { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1475 |
break; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1476 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1477 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1478 |
|
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1479 |
return false; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1480 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1481 |
|
1 | 1482 |
bool LinearScan::is_sorted(IntervalArray* intervals) { |
1483 |
int from = -1; |
|
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1484 |
int null_count = 0; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1485 |
|
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1486 |
for (int i = 0; i < intervals->length(); i++) { |
1 | 1487 |
Interval* it = intervals->at(i); |
1488 |
if (it != NULL) { |
|
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1489 |
assert(from <= it->from(), "Intervals are unordered"); |
1 | 1490 |
from = it->from(); |
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1491 |
} else { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1492 |
null_count++; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1493 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1494 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1495 |
|
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1496 |
assert(null_count == 0, "Sorted intervals should not contain nulls"); |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1497 |
|
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1498 |
null_count = 0; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1499 |
|
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1500 |
for (int i = 0; i < interval_count(); i++) { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1501 |
Interval* interval = interval_at(i); |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1502 |
if (interval != NULL) { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1503 |
assert(find_interval(interval, intervals), "Lists do not contain same intervals"); |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1504 |
} else { |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1505 |
null_count++; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1506 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1507 |
} |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1508 |
|
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1509 |
assert(interval_count() - null_count == intervals->length(), |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1510 |
"Sorted list should contain the same amount of non-NULL intervals as unsorted list"); |
1 | 1511 |
|
1512 |
return true; |
|
1513 |
} |
|
1514 |
#endif |
|
1515 |
||
1516 |
void LinearScan::add_to_list(Interval** first, Interval** prev, Interval* interval) { |
|
1517 |
if (*prev != NULL) { |
|
1518 |
(*prev)->set_next(interval); |
|
1519 |
} else { |
|
1520 |
*first = interval; |
|
1521 |
} |
|
1522 |
*prev = interval; |
|
1523 |
} |
|
1524 |
||
1525 |
void LinearScan::create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i)) { |
|
1526 |
assert(is_sorted(_sorted_intervals), "interval list is not sorted"); |
|
1527 |
||
1528 |
*list1 = *list2 = Interval::end(); |
|
1529 |
||
1530 |
Interval* list1_prev = NULL; |
|
1531 |
Interval* list2_prev = NULL; |
|
1532 |
Interval* v; |
|
1533 |
||
1534 |
const int n = _sorted_intervals->length(); |
|
1535 |
for (int i = 0; i < n; i++) { |
|
1536 |
v = _sorted_intervals->at(i); |
|
1537 |
if (v == NULL) continue; |
|
1538 |
||
1539 |
if (is_list1(v)) { |
|
1540 |
add_to_list(list1, &list1_prev, v); |
|
1541 |
} else if (is_list2 == NULL || is_list2(v)) { |
|
1542 |
add_to_list(list2, &list2_prev, v); |
|
1543 |
} |
|
1544 |
} |
|
1545 |
||
1546 |
if (list1_prev != NULL) list1_prev->set_next(Interval::end()); |
|
1547 |
if (list2_prev != NULL) list2_prev->set_next(Interval::end()); |
|
1548 |
||
1549 |
assert(list1_prev == NULL || list1_prev->next() == Interval::end(), "linear list ends not with sentinel"); |
|
1550 |
assert(list2_prev == NULL || list2_prev->next() == Interval::end(), "linear list ends not with sentinel"); |
|
1551 |
} |
|
1552 |
||
1553 |
||
1554 |
void LinearScan::sort_intervals_before_allocation() { |
|
1555 |
TIME_LINEAR_SCAN(timer_sort_intervals_before); |
|
1556 |
||
7705
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1557 |
if (_needs_full_resort) { |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1558 |
// There is no known reason why this should occur but just in case... |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1559 |
assert(false, "should never occur"); |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1560 |
// Re-sort existing interval list because an Interval::from() has changed |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1561 |
_sorted_intervals->sort(interval_cmp); |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1562 |
_needs_full_resort = false; |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1563 |
} |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1564 |
|
1 | 1565 |
IntervalList* unsorted_list = &_intervals; |
1566 |
int unsorted_len = unsorted_list->length(); |
|
1567 |
int sorted_len = 0; |
|
1568 |
int unsorted_idx; |
|
1569 |
int sorted_idx = 0; |
|
1570 |
int sorted_from_max = -1; |
|
1571 |
||
1572 |
// calc number of items for sorted list (sorted list must not contain NULL values) |
|
1573 |
for (unsorted_idx = 0; unsorted_idx < unsorted_len; unsorted_idx++) { |
|
1574 |
if (unsorted_list->at(unsorted_idx) != NULL) { |
|
1575 |
sorted_len++; |
|
1576 |
} |
|
1577 |
} |
|
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1578 |
IntervalArray* sorted_list = new IntervalArray(sorted_len, sorted_len, NULL); |
1 | 1579 |
|
1580 |
// special sorting algorithm: the original interval-list is almost sorted, |
|
1581 |
// only some intervals are swapped. So this is much faster than a complete QuickSort |
|
1582 |
for (unsorted_idx = 0; unsorted_idx < unsorted_len; unsorted_idx++) { |
|
1583 |
Interval* cur_interval = unsorted_list->at(unsorted_idx); |
|
1584 |
||
1585 |
if (cur_interval != NULL) { |
|
1586 |
int cur_from = cur_interval->from(); |
|
1587 |
||
1588 |
if (sorted_from_max <= cur_from) { |
|
1589 |
sorted_list->at_put(sorted_idx++, cur_interval); |
|
1590 |
sorted_from_max = cur_interval->from(); |
|
1591 |
} else { |
|
1592 |
// the asumption that the intervals are already sorted failed, |
|
1593 |
// so this interval must be sorted in manually |
|
1594 |
int j; |
|
1595 |
for (j = sorted_idx - 1; j >= 0 && cur_from < sorted_list->at(j)->from(); j--) { |
|
1596 |
sorted_list->at_put(j + 1, sorted_list->at(j)); |
|
1597 |
} |
|
1598 |
sorted_list->at_put(j + 1, cur_interval); |
|
1599 |
sorted_idx++; |
|
1600 |
} |
|
1601 |
} |
|
1602 |
} |
|
1603 |
_sorted_intervals = sorted_list; |
|
7705
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1604 |
assert(is_sorted(_sorted_intervals), "intervals unsorted"); |
1 | 1605 |
} |
1606 |
||
1607 |
void LinearScan::sort_intervals_after_allocation() { |
|
1608 |
TIME_LINEAR_SCAN(timer_sort_intervals_after); |
|
1609 |
||
7705
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1610 |
if (_needs_full_resort) { |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1611 |
// Re-sort existing interval list because an Interval::from() has changed |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1612 |
_sorted_intervals->sort(interval_cmp); |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1613 |
_needs_full_resort = false; |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1614 |
} |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1615 |
|
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1616 |
IntervalArray* old_list = _sorted_intervals; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1617 |
IntervalList* new_list = _new_intervals_from_allocation; |
1 | 1618 |
int old_len = old_list->length(); |
1619 |
int new_len = new_list->length(); |
|
1620 |
||
1621 |
if (new_len == 0) { |
|
1622 |
// no intervals have been added during allocation, so sorted list is already up to date |
|
7705
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1623 |
assert(is_sorted(_sorted_intervals), "intervals unsorted"); |
1 | 1624 |
return; |
1625 |
} |
|
1626 |
||
1627 |
// conventional sort-algorithm for new intervals |
|
1628 |
new_list->sort(interval_cmp); |
|
1629 |
||
1630 |
// merge old and new list (both already sorted) into one combined list |
|
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1631 |
int combined_list_len = old_len + new_len; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
1632 |
IntervalArray* combined_list = new IntervalArray(combined_list_len, combined_list_len, NULL); |
1 | 1633 |
int old_idx = 0; |
1634 |
int new_idx = 0; |
|
1635 |
||
1636 |
while (old_idx + new_idx < old_len + new_len) { |
|
1637 |
if (new_idx >= new_len || (old_idx < old_len && old_list->at(old_idx)->from() <= new_list->at(new_idx)->from())) { |
|
1638 |
combined_list->at_put(old_idx + new_idx, old_list->at(old_idx)); |
|
1639 |
old_idx++; |
|
1640 |
} else { |
|
1641 |
combined_list->at_put(old_idx + new_idx, new_list->at(new_idx)); |
|
1642 |
new_idx++; |
|
1643 |
} |
|
1644 |
} |
|
1645 |
||
1646 |
_sorted_intervals = combined_list; |
|
7705
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1647 |
assert(is_sorted(_sorted_intervals), "intervals unsorted"); |
1 | 1648 |
} |
1649 |
||
1650 |
||
1651 |
void LinearScan::allocate_registers() { |
|
1652 |
TIME_LINEAR_SCAN(timer_allocate_registers); |
|
1653 |
||
1654 |
Interval* precolored_cpu_intervals, *not_precolored_cpu_intervals; |
|
1655 |
Interval* precolored_fpu_intervals, *not_precolored_fpu_intervals; |
|
1656 |
||
1657 |
// allocate cpu registers |
|
25931
87d502b41fb3
8040921: Uninitialised memory in hotspot/src/share/vm/c1/c1_LinearScan.cpp
morris
parents:
25715
diff
changeset
|
1658 |
create_unhandled_lists(&precolored_cpu_intervals, ¬_precolored_cpu_intervals, |
87d502b41fb3
8040921: Uninitialised memory in hotspot/src/share/vm/c1/c1_LinearScan.cpp
morris
parents:
25715
diff
changeset
|
1659 |
is_precolored_cpu_interval, is_virtual_cpu_interval); |
87d502b41fb3
8040921: Uninitialised memory in hotspot/src/share/vm/c1/c1_LinearScan.cpp
morris
parents:
25715
diff
changeset
|
1660 |
|
87d502b41fb3
8040921: Uninitialised memory in hotspot/src/share/vm/c1/c1_LinearScan.cpp
morris
parents:
25715
diff
changeset
|
1661 |
// allocate fpu registers |
87d502b41fb3
8040921: Uninitialised memory in hotspot/src/share/vm/c1/c1_LinearScan.cpp
morris
parents:
25715
diff
changeset
|
1662 |
create_unhandled_lists(&precolored_fpu_intervals, ¬_precolored_fpu_intervals, |
87d502b41fb3
8040921: Uninitialised memory in hotspot/src/share/vm/c1/c1_LinearScan.cpp
morris
parents:
25715
diff
changeset
|
1663 |
is_precolored_fpu_interval, is_virtual_fpu_interval); |
87d502b41fb3
8040921: Uninitialised memory in hotspot/src/share/vm/c1/c1_LinearScan.cpp
morris
parents:
25715
diff
changeset
|
1664 |
|
87d502b41fb3
8040921: Uninitialised memory in hotspot/src/share/vm/c1/c1_LinearScan.cpp
morris
parents:
25715
diff
changeset
|
1665 |
// the fpu interval allocation cannot be moved down below with the fpu section as |
87d502b41fb3
8040921: Uninitialised memory in hotspot/src/share/vm/c1/c1_LinearScan.cpp
morris
parents:
25715
diff
changeset
|
1666 |
// the cpu_lsw.walk() changes interval positions. |
87d502b41fb3
8040921: Uninitialised memory in hotspot/src/share/vm/c1/c1_LinearScan.cpp
morris
parents:
25715
diff
changeset
|
1667 |
|
1 | 1668 |
LinearScanWalker cpu_lsw(this, precolored_cpu_intervals, not_precolored_cpu_intervals); |
1669 |
cpu_lsw.walk(); |
|
1670 |
cpu_lsw.finish_allocation(); |
|
1671 |
||
1672 |
if (has_fpu_registers()) { |
|
1673 |
LinearScanWalker fpu_lsw(this, precolored_fpu_intervals, not_precolored_fpu_intervals); |
|
1674 |
fpu_lsw.walk(); |
|
1675 |
fpu_lsw.finish_allocation(); |
|
1676 |
} |
|
1677 |
} |
|
1678 |
||
1679 |
||
1680 |
// ********** Phase 6: resolve data flow |
|
1681 |
// (insert moves at edges between blocks if intervals have been split) |
|
1682 |
||
1683 |
// wrapper for Interval::split_child_at_op_id that performs a bailout in product mode |
|
1684 |
// instead of returning NULL |
|
1685 |
Interval* LinearScan::split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode) { |
|
1686 |
Interval* result = interval->split_child_at_op_id(op_id, mode); |
|
1687 |
if (result != NULL) { |
|
1688 |
return result; |
|
1689 |
} |
|
1690 |
||
1691 |
assert(false, "must find an interval, but do a clean bailout in product mode"); |
|
1692 |
result = new Interval(LIR_OprDesc::vreg_base); |
|
1693 |
result->assign_reg(0); |
|
1694 |
result->set_type(T_INT); |
|
1695 |
BAILOUT_("LinearScan: interval is NULL", result); |
|
1696 |
} |
|
1697 |
||
1698 |
||
1699 |
Interval* LinearScan::interval_at_block_begin(BlockBegin* block, int reg_num) { |
|
1700 |
assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds"); |
|
1701 |
assert(interval_at(reg_num) != NULL, "no interval found"); |
|
1702 |
||
1703 |
return split_child_at_op_id(interval_at(reg_num), block->first_lir_instruction_id(), LIR_OpVisitState::outputMode); |
|
1704 |
} |
|
1705 |
||
1706 |
Interval* LinearScan::interval_at_block_end(BlockBegin* block, int reg_num) { |
|
1707 |
assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds"); |
|
1708 |
assert(interval_at(reg_num) != NULL, "no interval found"); |
|
1709 |
||
1710 |
return split_child_at_op_id(interval_at(reg_num), block->last_lir_instruction_id() + 1, LIR_OpVisitState::outputMode); |
|
1711 |
} |
|
1712 |
||
1713 |
Interval* LinearScan::interval_at_op_id(int reg_num, int op_id) { |
|
1714 |
assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds"); |
|
1715 |
assert(interval_at(reg_num) != NULL, "no interval found"); |
|
1716 |
||
1717 |
return split_child_at_op_id(interval_at(reg_num), op_id, LIR_OpVisitState::inputMode); |
|
1718 |
} |
|
1719 |
||
1720 |
||
1721 |
void LinearScan::resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver) { |
|
1722 |
DEBUG_ONLY(move_resolver.check_empty()); |
|
1723 |
||
1724 |
const int num_regs = num_virtual_regs(); |
|
1725 |
const int size = live_set_size(); |
|
38177 | 1726 |
const ResourceBitMap live_at_edge = to_block->live_in(); |
1 | 1727 |
|
1728 |
// visit all registers where the live_at_edge bit is set |
|
1066 | 1729 |
for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) { |
1 | 1730 |
assert(r < num_regs, "live information set for not exisiting interval"); |
1731 |
assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge"); |
|
1732 |
||
1733 |
Interval* from_interval = interval_at_block_end(from_block, r); |
|
1734 |
Interval* to_interval = interval_at_block_begin(to_block, r); |
|
1735 |
||
1736 |
if (from_interval != to_interval && (from_interval->assigned_reg() != to_interval->assigned_reg() || from_interval->assigned_regHi() != to_interval->assigned_regHi())) { |
|
1737 |
// need to insert move instruction |
|
1738 |
move_resolver.add_mapping(from_interval, to_interval); |
|
1739 |
} |
|
1740 |
} |
|
1741 |
} |
|
1742 |
||
1743 |
||
1744 |
void LinearScan::resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver) { |
|
1745 |
if (from_block->number_of_sux() <= 1) { |
|
1746 |
TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at end of from_block B%d", from_block->block_id())); |
|
1747 |
||
1748 |
LIR_OpList* instructions = from_block->lir()->instructions_list(); |
|
1749 |
LIR_OpBranch* branch = instructions->last()->as_OpBranch(); |
|
1750 |
if (branch != NULL) { |
|
1751 |
// insert moves before branch |
|
1752 |
assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump"); |
|
1753 |
move_resolver.set_insert_position(from_block->lir(), instructions->length() - 2); |
|
1754 |
} else { |
|
1755 |
move_resolver.set_insert_position(from_block->lir(), instructions->length() - 1); |
|
1756 |
} |
|
1757 |
||
1758 |
} else { |
|
1759 |
TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at beginning of to_block B%d", to_block->block_id())); |
|
1760 |
#ifdef ASSERT |
|
1761 |
assert(from_block->lir()->instructions_list()->at(0)->as_OpLabel() != NULL, "block does not start with a label"); |
|
1762 |
||
1763 |
// because the number of predecessor edges matches the number of |
|
1764 |
// successor edges, blocks which are reached by switch statements |
|
1765 |
// may have be more than one predecessor but it will be guaranteed |
|
1766 |
// that all predecessors will be the same. |
|
1767 |
for (int i = 0; i < to_block->number_of_preds(); i++) { |
|
1768 |
assert(from_block == to_block->pred_at(i), "all critical edges must be broken"); |
|
1769 |
} |
|
1770 |
#endif |
|
1771 |
||
1772 |
move_resolver.set_insert_position(to_block->lir(), 0); |
|
1773 |
} |
|
1774 |
} |
|
1775 |
||
1776 |
||
1777 |
// insert necessary moves (spilling or reloading) at edges between blocks if interval has been split |
|
1778 |
void LinearScan::resolve_data_flow() { |
|
1779 |
TIME_LINEAR_SCAN(timer_resolve_data_flow); |
|
1780 |
||
1781 |
int num_blocks = block_count(); |
|
1782 |
MoveResolver move_resolver(this); |
|
39219
1b33aa56ed18
8155638: Resource allocated BitMaps are often cleared twice
stefank
parents:
38658
diff
changeset
|
1783 |
ResourceBitMap block_completed(num_blocks); |
1b33aa56ed18
8155638: Resource allocated BitMaps are often cleared twice
stefank
parents:
38658
diff
changeset
|
1784 |
ResourceBitMap already_resolved(num_blocks); |
1 | 1785 |
|
1786 |
int i; |
|
1787 |
for (i = 0; i < num_blocks; i++) { |
|
1788 |
BlockBegin* block = block_at(i); |
|
1789 |
||
1790 |
// check if block has only one predecessor and only one successor |
|
1791 |
if (block->number_of_preds() == 1 && block->number_of_sux() == 1 && block->number_of_exception_handlers() == 0) { |
|
1792 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
1793 |
assert(instructions->at(0)->code() == lir_label, "block must start with label"); |
|
1794 |
assert(instructions->last()->code() == lir_branch, "block with successors must end with branch"); |
|
1795 |
assert(instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block with successor must end with unconditional branch"); |
|
1796 |
||
1797 |
// check if block is empty (only label and branch) |
|
1798 |
if (instructions->length() == 2) { |
|
1799 |
BlockBegin* pred = block->pred_at(0); |
|
1800 |
BlockBegin* sux = block->sux_at(0); |
|
1801 |
||
1802 |
// prevent optimization of two consecutive blocks |
|
1803 |
if (!block_completed.at(pred->linear_scan_number()) && !block_completed.at(sux->linear_scan_number())) { |
|
1804 |
TRACE_LINEAR_SCAN(3, tty->print_cr("**** optimizing empty block B%d (pred: B%d, sux: B%d)", block->block_id(), pred->block_id(), sux->block_id())); |
|
1805 |
block_completed.set_bit(block->linear_scan_number()); |
|
1806 |
||
1807 |
// directly resolve between pred and sux (without looking at the empty block between) |
|
1808 |
resolve_collect_mappings(pred, sux, move_resolver); |
|
1809 |
if (move_resolver.has_mappings()) { |
|
1810 |
move_resolver.set_insert_position(block->lir(), 0); |
|
1811 |
move_resolver.resolve_and_append_moves(); |
|
1812 |
} |
|
1813 |
} |
|
1814 |
} |
|
1815 |
} |
|
1816 |
} |
|
1817 |
||
1818 |
||
1819 |
for (i = 0; i < num_blocks; i++) { |
|
1820 |
if (!block_completed.at(i)) { |
|
1821 |
BlockBegin* from_block = block_at(i); |
|
1822 |
already_resolved.set_from(block_completed); |
|
1823 |
||
1824 |
int num_sux = from_block->number_of_sux(); |
|
1825 |
for (int s = 0; s < num_sux; s++) { |
|
1826 |
BlockBegin* to_block = from_block->sux_at(s); |
|
1827 |
||
1828 |
// check for duplicate edges between the same blocks (can happen with switch blocks) |
|
1829 |
if (!already_resolved.at(to_block->linear_scan_number())) { |
|
1830 |
TRACE_LINEAR_SCAN(3, tty->print_cr("**** processing edge between B%d and B%d", from_block->block_id(), to_block->block_id())); |
|
1831 |
already_resolved.set_bit(to_block->linear_scan_number()); |
|
1832 |
||
1833 |
// collect all intervals that have been split between from_block and to_block |
|
1834 |
resolve_collect_mappings(from_block, to_block, move_resolver); |
|
1835 |
if (move_resolver.has_mappings()) { |
|
1836 |
resolve_find_insert_pos(from_block, to_block, move_resolver); |
|
1837 |
move_resolver.resolve_and_append_moves(); |
|
1838 |
} |
|
1839 |
} |
|
1840 |
} |
|
1841 |
} |
|
1842 |
} |
|
1843 |
} |
|
1844 |
||
1845 |
||
1846 |
void LinearScan::resolve_exception_entry(BlockBegin* block, int reg_num, MoveResolver &move_resolver) { |
|
1847 |
if (interval_at(reg_num) == NULL) { |
|
1848 |
// if a phi function is never used, no interval is created -> ignore this |
|
1849 |
return; |
|
1850 |
} |
|
1851 |
||
1852 |
Interval* interval = interval_at_block_begin(block, reg_num); |
|
1853 |
int reg = interval->assigned_reg(); |
|
1854 |
int regHi = interval->assigned_regHi(); |
|
1855 |
||
1856 |
if ((reg < nof_regs && interval->always_in_memory()) || |
|
1857 |
(use_fpu_stack_allocation() && reg >= pd_first_fpu_reg && reg <= pd_last_fpu_reg)) { |
|
1858 |
// the interval is split to get a short range that is located on the stack |
|
1859 |
// in the following two cases: |
|
1860 |
// * the interval started in memory (e.g. method parameter), but is currently in a register |
|
1861 |
// this is an optimization for exception handling that reduces the number of moves that |
|
1862 |
// are necessary for resolving the states when an exception uses this exception handler |
|
1863 |
// * the interval would be on the fpu stack at the begin of the exception handler |
|
1864 |
// this is not allowed because of the complicated fpu stack handling on Intel |
|
1865 |
||
1866 |
// range that will be spilled to memory |
|
1867 |
int from_op_id = block->first_lir_instruction_id(); |
|
1868 |
int to_op_id = from_op_id + 1; // short live range of length 1 |
|
1869 |
assert(interval->from() <= from_op_id && interval->to() >= to_op_id, |
|
1870 |
"no split allowed between exception entry and first instruction"); |
|
1871 |
||
1872 |
if (interval->from() != from_op_id) { |
|
1873 |
// the part before from_op_id is unchanged |
|
1874 |
interval = interval->split(from_op_id); |
|
1875 |
interval->assign_reg(reg, regHi); |
|
1876 |
append_interval(interval); |
|
7705
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1877 |
} else { |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
1878 |
_needs_full_resort = true; |
1 | 1879 |
} |
1880 |
assert(interval->from() == from_op_id, "must be true now"); |
|
1881 |
||
1882 |
Interval* spilled_part = interval; |
|
1883 |
if (interval->to() != to_op_id) { |
|
1884 |
// the part after to_op_id is unchanged |
|
1885 |
spilled_part = interval->split_from_start(to_op_id); |
|
1886 |
append_interval(spilled_part); |
|
1887 |
move_resolver.add_mapping(spilled_part, interval); |
|
1888 |
} |
|
1889 |
assign_spill_slot(spilled_part); |
|
1890 |
||
1891 |
assert(spilled_part->from() == from_op_id && spilled_part->to() == to_op_id, "just checking"); |
|
1892 |
} |
|
1893 |
} |
|
1894 |
||
1895 |
void LinearScan::resolve_exception_entry(BlockBegin* block, MoveResolver &move_resolver) { |
|
1896 |
assert(block->is_set(BlockBegin::exception_entry_flag), "should not call otherwise"); |
|
1897 |
DEBUG_ONLY(move_resolver.check_empty()); |
|
1898 |
||
1899 |
// visit all registers where the live_in bit is set |
|
1900 |
int size = live_set_size(); |
|
1066 | 1901 |
for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) { |
1 | 1902 |
resolve_exception_entry(block, r, move_resolver); |
1903 |
} |
|
1904 |
||
1905 |
// the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately |
|
1906 |
for_each_phi_fun(block, phi, |
|
1907 |
resolve_exception_entry(block, phi->operand()->vreg_number(), move_resolver) |
|
1908 |
); |
|
1909 |
||
1910 |
if (move_resolver.has_mappings()) { |
|
1911 |
// insert moves after first instruction |
|
11964
96fb8c3562f7
6910461: Register allocator may insert spill code at wrong insertion index
roland
parents:
11792
diff
changeset
|
1912 |
move_resolver.set_insert_position(block->lir(), 0); |
1 | 1913 |
move_resolver.resolve_and_append_moves(); |
1914 |
} |
|
1915 |
} |
|
1916 |
||
1917 |
||
1918 |
void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver) { |
|
1919 |
if (interval_at(reg_num) == NULL) { |
|
1920 |
// if a phi function is never used, no interval is created -> ignore this |
|
1921 |
return; |
|
1922 |
} |
|
1923 |
||
1924 |
// the computation of to_interval is equal to resolve_collect_mappings, |
|
1925 |
// but from_interval is more complicated because of phi functions |
|
1926 |
BlockBegin* to_block = handler->entry_block(); |
|
1927 |
Interval* to_interval = interval_at_block_begin(to_block, reg_num); |
|
1928 |
||
1929 |
if (phi != NULL) { |
|
1930 |
// phi function of the exception entry block |
|
1931 |
// no moves are created for this phi function in the LIR_Generator, so the |
|
1932 |
// interval at the throwing instruction must be searched using the operands |
|
1933 |
// of the phi function |
|
1934 |
Value from_value = phi->operand_at(handler->phi_operand()); |
|
1935 |
||
1936 |
// with phi functions it can happen that the same from_value is used in |
|
1937 |
// multiple mappings, so notify move-resolver that this is allowed |
|
1938 |
move_resolver.set_multiple_reads_allowed(); |
|
1939 |
||
1940 |
Constant* con = from_value->as_Constant(); |
|
1941 |
if (con != NULL && !con->is_pinned()) { |
|
1942 |
// unpinned constants may have no register, so add mapping from constant to interval |
|
1943 |
move_resolver.add_mapping(LIR_OprFact::value_type(con->type()), to_interval); |
|
1944 |
} else { |
|
1945 |
// search split child at the throwing op_id |
|
1946 |
Interval* from_interval = interval_at_op_id(from_value->operand()->vreg_number(), throwing_op_id); |
|
1947 |
move_resolver.add_mapping(from_interval, to_interval); |
|
1948 |
} |
|
1949 |
||
1950 |
} else { |
|
1951 |
// no phi function, so use reg_num also for from_interval |
|
1952 |
// search split child at the throwing op_id |
|
1953 |
Interval* from_interval = interval_at_op_id(reg_num, throwing_op_id); |
|
1954 |
if (from_interval != to_interval) { |
|
1955 |
// optimization to reduce number of moves: when to_interval is on stack and |
|
1956 |
// the stack slot is known to be always correct, then no move is necessary |
|
1957 |
if (!from_interval->always_in_memory() || from_interval->canonical_spill_slot() != to_interval->assigned_reg()) { |
|
1958 |
move_resolver.add_mapping(from_interval, to_interval); |
|
1959 |
} |
|
1960 |
} |
|
1961 |
} |
|
1962 |
} |
|
1963 |
||
1964 |
void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, MoveResolver &move_resolver) { |
|
1965 |
TRACE_LINEAR_SCAN(4, tty->print_cr("resolving exception handler B%d: throwing_op_id=%d", handler->entry_block()->block_id(), throwing_op_id)); |
|
1966 |
||
1967 |
DEBUG_ONLY(move_resolver.check_empty()); |
|
1968 |
assert(handler->lir_op_id() == -1, "already processed this xhandler"); |
|
1969 |
DEBUG_ONLY(handler->set_lir_op_id(throwing_op_id)); |
|
1970 |
assert(handler->entry_code() == NULL, "code already present"); |
|
1971 |
||
1972 |
// visit all registers where the live_in bit is set |
|
1973 |
BlockBegin* block = handler->entry_block(); |
|
1974 |
int size = live_set_size(); |
|
1066 | 1975 |
for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) { |
1 | 1976 |
resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver); |
1977 |
} |
|
1978 |
||
1979 |
// the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately |
|
1980 |
for_each_phi_fun(block, phi, |
|
1981 |
resolve_exception_edge(handler, throwing_op_id, phi->operand()->vreg_number(), phi, move_resolver) |
|
1982 |
); |
|
1983 |
||
1984 |
if (move_resolver.has_mappings()) { |
|
1985 |
LIR_List* entry_code = new LIR_List(compilation()); |
|
1986 |
move_resolver.set_insert_position(entry_code, 0); |
|
1987 |
move_resolver.resolve_and_append_moves(); |
|
1988 |
||
1989 |
entry_code->jump(handler->entry_block()); |
|
1990 |
handler->set_entry_code(entry_code); |
|
1991 |
} |
|
1992 |
} |
|
1993 |
||
1994 |
||
1995 |
void LinearScan::resolve_exception_handlers() { |
|
1996 |
MoveResolver move_resolver(this); |
|
1997 |
LIR_OpVisitState visitor; |
|
1998 |
int num_blocks = block_count(); |
|
1999 |
||
2000 |
int i; |
|
2001 |
for (i = 0; i < num_blocks; i++) { |
|
2002 |
BlockBegin* block = block_at(i); |
|
2003 |
if (block->is_set(BlockBegin::exception_entry_flag)) { |
|
2004 |
resolve_exception_entry(block, move_resolver); |
|
2005 |
} |
|
2006 |
} |
|
2007 |
||
2008 |
for (i = 0; i < num_blocks; i++) { |
|
2009 |
BlockBegin* block = block_at(i); |
|
2010 |
LIR_List* ops = block->lir(); |
|
2011 |
int num_ops = ops->length(); |
|
2012 |
||
2013 |
// iterate all instructions of the block. skip the first because it is always a label |
|
2014 |
assert(visitor.no_operands(ops->at(0)), "first operation must always be a label"); |
|
2015 |
for (int j = 1; j < num_ops; j++) { |
|
2016 |
LIR_Op* op = ops->at(j); |
|
2017 |
int op_id = op->id(); |
|
2018 |
||
2019 |
if (op_id != -1 && has_info(op_id)) { |
|
2020 |
// visit operation to collect all operands |
|
2021 |
visitor.visit(op); |
|
2022 |
assert(visitor.info_count() > 0, "should not visit otherwise"); |
|
2023 |
||
2024 |
XHandlers* xhandlers = visitor.all_xhandler(); |
|
2025 |
int n = xhandlers->length(); |
|
2026 |
for (int k = 0; k < n; k++) { |
|
2027 |
resolve_exception_edge(xhandlers->handler_at(k), op_id, move_resolver); |
|
2028 |
} |
|
2029 |
||
2030 |
#ifdef ASSERT |
|
2031 |
} else { |
|
2032 |
visitor.visit(op); |
|
2033 |
assert(visitor.all_xhandler()->length() == 0, "missed exception handler"); |
|
2034 |
#endif |
|
2035 |
} |
|
2036 |
} |
|
2037 |
} |
|
2038 |
} |
|
2039 |
||
2040 |
||
2041 |
// ********** Phase 7: assign register numbers back to LIR |
|
2042 |
// (includes computation of debug information and oop maps) |
|
2043 |
||
2044 |
VMReg LinearScan::vm_reg_for_interval(Interval* interval) { |
|
2045 |
VMReg reg = interval->cached_vm_reg(); |
|
2046 |
if (!reg->is_valid() ) { |
|
2047 |
reg = vm_reg_for_operand(operand_for_interval(interval)); |
|
2048 |
interval->set_cached_vm_reg(reg); |
|
2049 |
} |
|
2050 |
assert(reg == vm_reg_for_operand(operand_for_interval(interval)), "wrong cached value"); |
|
2051 |
return reg; |
|
2052 |
} |
|
2053 |
||
2054 |
VMReg LinearScan::vm_reg_for_operand(LIR_Opr opr) { |
|
2055 |
assert(opr->is_oop(), "currently only implemented for oop operands"); |
|
2056 |
return frame_map()->regname(opr); |
|
2057 |
} |
|
2058 |
||
2059 |
||
2060 |
LIR_Opr LinearScan::operand_for_interval(Interval* interval) { |
|
2061 |
LIR_Opr opr = interval->cached_opr(); |
|
2062 |
if (opr->is_illegal()) { |
|
2063 |
opr = calc_operand_for_interval(interval); |
|
2064 |
interval->set_cached_opr(opr); |
|
2065 |
} |
|
2066 |
||
2067 |
assert(opr == calc_operand_for_interval(interval), "wrong cached value"); |
|
2068 |
return opr; |
|
2069 |
} |
|
2070 |
||
2071 |
LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) { |
|
2072 |
int assigned_reg = interval->assigned_reg(); |
|
2073 |
BasicType type = interval->type(); |
|
2074 |
||
2075 |
if (assigned_reg >= nof_regs) { |
|
2076 |
// stack slot |
|
2077 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
|
2078 |
return LIR_OprFact::stack(assigned_reg - nof_regs, type); |
|
2079 |
||
2080 |
} else { |
|
2081 |
// register |
|
2082 |
switch (type) { |
|
2083 |
case T_OBJECT: { |
|
2084 |
assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register"); |
|
2085 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
|
2086 |
return LIR_OprFact::single_cpu_oop(assigned_reg); |
|
2087 |
} |
|
2088 |
||
6742
81ef369b8fc7
6972540: sun/nio/ch/SocketChannelImpl compilation crashed when executing CompileTheWorld
never
parents:
6176
diff
changeset
|
2089 |
case T_ADDRESS: { |
81ef369b8fc7
6972540: sun/nio/ch/SocketChannelImpl compilation crashed when executing CompileTheWorld
never
parents:
6176
diff
changeset
|
2090 |
assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register"); |
81ef369b8fc7
6972540: sun/nio/ch/SocketChannelImpl compilation crashed when executing CompileTheWorld
never
parents:
6176
diff
changeset
|
2091 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
81ef369b8fc7
6972540: sun/nio/ch/SocketChannelImpl compilation crashed when executing CompileTheWorld
never
parents:
6176
diff
changeset
|
2092 |
return LIR_OprFact::single_cpu_address(assigned_reg); |
81ef369b8fc7
6972540: sun/nio/ch/SocketChannelImpl compilation crashed when executing CompileTheWorld
never
parents:
6176
diff
changeset
|
2093 |
} |
81ef369b8fc7
6972540: sun/nio/ch/SocketChannelImpl compilation crashed when executing CompileTheWorld
never
parents:
6176
diff
changeset
|
2094 |
|
13742
9180987e305d
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
13195
diff
changeset
|
2095 |
case T_METADATA: { |
9180987e305d
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
13195
diff
changeset
|
2096 |
assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register"); |
9180987e305d
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
13195
diff
changeset
|
2097 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
9180987e305d
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
13195
diff
changeset
|
2098 |
return LIR_OprFact::single_cpu_metadata(assigned_reg); |
9180987e305d
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
13195
diff
changeset
|
2099 |
} |
9180987e305d
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
13195
diff
changeset
|
2100 |
|
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2101 |
#ifdef __SOFTFP__ |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2102 |
case T_FLOAT: // fall through |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2103 |
#endif // __SOFTFP__ |
1 | 2104 |
case T_INT: { |
2105 |
assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register"); |
|
2106 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
|
2107 |
return LIR_OprFact::single_cpu(assigned_reg); |
|
2108 |
} |
|
2109 |
||
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2110 |
#ifdef __SOFTFP__ |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2111 |
case T_DOUBLE: // fall through |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2112 |
#endif // __SOFTFP__ |
1 | 2113 |
case T_LONG: { |
2114 |
int assigned_regHi = interval->assigned_regHi(); |
|
2115 |
assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register"); |
|
2116 |
assert(num_physical_regs(T_LONG) == 1 || |
|
2117 |
(assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register"); |
|
2118 |
||
2119 |
assert(assigned_reg != assigned_regHi, "invalid allocation"); |
|
2120 |
assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi, |
|
2121 |
"register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)"); |
|
2122 |
assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match"); |
|
2123 |
if (requires_adjacent_regs(T_LONG)) { |
|
2124 |
assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even"); |
|
2125 |
} |
|
2126 |
||
2127 |
#ifdef _LP64 |
|
2128 |
return LIR_OprFact::double_cpu(assigned_reg, assigned_reg); |
|
2129 |
#else |
|
34220 | 2130 |
#if defined(SPARC) || defined(PPC32) |
1 | 2131 |
return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg); |
2132 |
#else |
|
2133 |
return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi); |
|
1066 | 2134 |
#endif // SPARC |
2135 |
#endif // LP64 |
|
1 | 2136 |
} |
2137 |
||
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2138 |
#ifndef __SOFTFP__ |
1 | 2139 |
case T_FLOAT: { |
1066 | 2140 |
#ifdef X86 |
1 | 2141 |
if (UseSSE >= 1) { |
30624 | 2142 |
int last_xmm_reg = pd_last_xmm_reg; |
2143 |
#ifdef _LP64 |
|
2144 |
if (UseAVX < 3) { |
|
2145 |
last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1; |
|
2146 |
} |
|
2147 |
#endif |
|
2148 |
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register"); |
|
1 | 2149 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
2150 |
return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg); |
|
2151 |
} |
|
2152 |
#endif |
|
2153 |
||
2154 |
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register"); |
|
2155 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); |
|
2156 |
return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg); |
|
2157 |
} |
|
2158 |
||
2159 |
case T_DOUBLE: { |
|
1066 | 2160 |
#ifdef X86 |
1 | 2161 |
if (UseSSE >= 2) { |
30624 | 2162 |
int last_xmm_reg = pd_last_xmm_reg; |
2163 |
#ifdef _LP64 |
|
2164 |
if (UseAVX < 3) { |
|
2165 |
last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1; |
|
2166 |
} |
|
2167 |
#endif |
|
2168 |
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register"); |
|
1 | 2169 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)"); |
2170 |
return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg); |
|
2171 |
} |
|
2172 |
#endif |
|
2173 |
||
2174 |
#ifdef SPARC |
|
2175 |
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register"); |
|
2176 |
assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register"); |
|
2177 |
assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even"); |
|
2178 |
LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg); |
|
29474
81a5c5330d08
8072383: resolve conflicts between open and closed ports
dlong
parents:
25931
diff
changeset
|
2179 |
#elif defined(ARM32) |
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2180 |
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register"); |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2181 |
assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register"); |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2182 |
assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even"); |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2183 |
LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg); |
1 | 2184 |
#else |
2185 |
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register"); |
|
2186 |
assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)"); |
|
2187 |
LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg); |
|
2188 |
#endif |
|
2189 |
return result; |
|
2190 |
} |
|
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2191 |
#endif // __SOFTFP__ |
1 | 2192 |
|
2193 |
default: { |
|
2194 |
ShouldNotReachHere(); |
|
2195 |
return LIR_OprFact::illegalOpr; |
|
2196 |
} |
|
2197 |
} |
|
2198 |
} |
|
2199 |
} |
|
2200 |
||
2201 |
LIR_Opr LinearScan::canonical_spill_opr(Interval* interval) { |
|
2202 |
assert(interval->canonical_spill_slot() >= nof_regs, "canonical spill slot not set"); |
|
2203 |
return LIR_OprFact::stack(interval->canonical_spill_slot() - nof_regs, interval->type()); |
|
2204 |
} |
|
2205 |
||
2206 |
LIR_Opr LinearScan::color_lir_opr(LIR_Opr opr, int op_id, LIR_OpVisitState::OprMode mode) { |
|
2207 |
assert(opr->is_virtual(), "should not call this otherwise"); |
|
2208 |
||
2209 |
Interval* interval = interval_at(opr->vreg_number()); |
|
2210 |
assert(interval != NULL, "interval must exist"); |
|
2211 |
||
2212 |
if (op_id != -1) { |
|
2213 |
#ifdef ASSERT |
|
2214 |
BlockBegin* block = block_of_op_with_id(op_id); |
|
2215 |
if (block->number_of_sux() <= 1 && op_id == block->last_lir_instruction_id()) { |
|
2216 |
// check if spill moves could have been appended at the end of this block, but |
|
2217 |
// before the branch instruction. So the split child information for this branch would |
|
2218 |
// be incorrect. |
|
2219 |
LIR_OpBranch* branch = block->lir()->instructions_list()->last()->as_OpBranch(); |
|
2220 |
if (branch != NULL) { |
|
2221 |
if (block->live_out().at(opr->vreg_number())) { |
|
2222 |
assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump"); |
|
2223 |
assert(false, "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolve_data_flow)"); |
|
2224 |
} |
|
2225 |
} |
|
2226 |
} |
|
2227 |
#endif |
|
2228 |
||
2229 |
// operands are not changed when an interval is split during allocation, |
|
2230 |
// so search the right interval here |
|
2231 |
interval = split_child_at_op_id(interval, op_id, mode); |
|
2232 |
} |
|
2233 |
||
2234 |
LIR_Opr res = operand_for_interval(interval); |
|
2235 |
||
1066 | 2236 |
#ifdef X86 |
1 | 2237 |
// new semantic for is_last_use: not only set on definite end of interval, |
2238 |
// but also before hole |
|
2239 |
// This may still miss some cases (e.g. for dead values), but it is not necessary that the |
|
2240 |
// last use information is completely correct |
|
2241 |
// information is only needed for fpu stack allocation |
|
2242 |
if (res->is_fpu_register()) { |
|
2243 |
if (opr->is_last_use() || op_id == interval->to() || (op_id != -1 && interval->has_hole_between(op_id, op_id + 1))) { |
|
2244 |
assert(op_id == -1 || !is_block_begin(op_id), "holes at begin of block may also result from control flow"); |
|
2245 |
res = res->make_last_use(); |
|
2246 |
} |
|
2247 |
} |
|
2248 |
#endif |
|
2249 |
||
2250 |
assert(!gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::callee_saved) || !FrameMap::is_caller_save_register(res), "bad allocation"); |
|
2251 |
||
2252 |
return res; |
|
2253 |
} |
|
2254 |
||
2255 |
||
2256 |
#ifdef ASSERT |
|
2257 |
// some methods used to check correctness of debug information |
|
2258 |
||
2259 |
void assert_no_register_values(GrowableArray<ScopeValue*>* values) { |
|
2260 |
if (values == NULL) { |
|
2261 |
return; |
|
2262 |
} |
|
2263 |
||
2264 |
for (int i = 0; i < values->length(); i++) { |
|
2265 |
ScopeValue* value = values->at(i); |
|
2266 |
||
2267 |
if (value->is_location()) { |
|
2268 |
Location location = ((LocationValue*)value)->location(); |
|
2269 |
assert(location.where() == Location::on_stack, "value is in register"); |
|
2270 |
} |
|
2271 |
} |
|
2272 |
} |
|
2273 |
||
2274 |
void assert_no_register_values(GrowableArray<MonitorValue*>* values) { |
|
2275 |
if (values == NULL) { |
|
2276 |
return; |
|
2277 |
} |
|
2278 |
||
2279 |
for (int i = 0; i < values->length(); i++) { |
|
2280 |
MonitorValue* value = values->at(i); |
|
2281 |
||
2282 |
if (value->owner()->is_location()) { |
|
2283 |
Location location = ((LocationValue*)value->owner())->location(); |
|
2284 |
assert(location.where() == Location::on_stack, "owner is in register"); |
|
2285 |
} |
|
2286 |
assert(value->basic_lock().where() == Location::on_stack, "basic_lock is in register"); |
|
2287 |
} |
|
2288 |
} |
|
2289 |
||
2290 |
void assert_equal(Location l1, Location l2) { |
|
2291 |
assert(l1.where() == l2.where() && l1.type() == l2.type() && l1.offset() == l2.offset(), ""); |
|
2292 |
} |
|
2293 |
||
2294 |
void assert_equal(ScopeValue* v1, ScopeValue* v2) { |
|
2295 |
if (v1->is_location()) { |
|
2296 |
assert(v2->is_location(), ""); |
|
2297 |
assert_equal(((LocationValue*)v1)->location(), ((LocationValue*)v2)->location()); |
|
2298 |
} else if (v1->is_constant_int()) { |
|
2299 |
assert(v2->is_constant_int(), ""); |
|
2300 |
assert(((ConstantIntValue*)v1)->value() == ((ConstantIntValue*)v2)->value(), ""); |
|
2301 |
} else if (v1->is_constant_double()) { |
|
2302 |
assert(v2->is_constant_double(), ""); |
|
2303 |
assert(((ConstantDoubleValue*)v1)->value() == ((ConstantDoubleValue*)v2)->value(), ""); |
|
2304 |
} else if (v1->is_constant_long()) { |
|
2305 |
assert(v2->is_constant_long(), ""); |
|
2306 |
assert(((ConstantLongValue*)v1)->value() == ((ConstantLongValue*)v2)->value(), ""); |
|
2307 |
} else if (v1->is_constant_oop()) { |
|
2308 |
assert(v2->is_constant_oop(), ""); |
|
2309 |
assert(((ConstantOopWriteValue*)v1)->value() == ((ConstantOopWriteValue*)v2)->value(), ""); |
|
2310 |
} else { |
|
2311 |
ShouldNotReachHere(); |
|
2312 |
} |
|
2313 |
} |
|
2314 |
||
2315 |
void assert_equal(MonitorValue* m1, MonitorValue* m2) { |
|
2316 |
assert_equal(m1->owner(), m2->owner()); |
|
2317 |
assert_equal(m1->basic_lock(), m2->basic_lock()); |
|
2318 |
} |
|
2319 |
||
2320 |
void assert_equal(IRScopeDebugInfo* d1, IRScopeDebugInfo* d2) { |
|
2321 |
assert(d1->scope() == d2->scope(), "not equal"); |
|
2322 |
assert(d1->bci() == d2->bci(), "not equal"); |
|
2323 |
||
2324 |
if (d1->locals() != NULL) { |
|
2325 |
assert(d1->locals() != NULL && d2->locals() != NULL, "not equal"); |
|
2326 |
assert(d1->locals()->length() == d2->locals()->length(), "not equal"); |
|
2327 |
for (int i = 0; i < d1->locals()->length(); i++) { |
|
2328 |
assert_equal(d1->locals()->at(i), d2->locals()->at(i)); |
|
2329 |
} |
|
2330 |
} else { |
|
2331 |
assert(d1->locals() == NULL && d2->locals() == NULL, "not equal"); |
|
2332 |
} |
|
2333 |
||
2334 |
if (d1->expressions() != NULL) { |
|
2335 |
assert(d1->expressions() != NULL && d2->expressions() != NULL, "not equal"); |
|
2336 |
assert(d1->expressions()->length() == d2->expressions()->length(), "not equal"); |
|
2337 |
for (int i = 0; i < d1->expressions()->length(); i++) { |
|
2338 |
assert_equal(d1->expressions()->at(i), d2->expressions()->at(i)); |
|
2339 |
} |
|
2340 |
} else { |
|
2341 |
assert(d1->expressions() == NULL && d2->expressions() == NULL, "not equal"); |
|
2342 |
} |
|
2343 |
||
2344 |
if (d1->monitors() != NULL) { |
|
2345 |
assert(d1->monitors() != NULL && d2->monitors() != NULL, "not equal"); |
|
2346 |
assert(d1->monitors()->length() == d2->monitors()->length(), "not equal"); |
|
2347 |
for (int i = 0; i < d1->monitors()->length(); i++) { |
|
2348 |
assert_equal(d1->monitors()->at(i), d2->monitors()->at(i)); |
|
2349 |
} |
|
2350 |
} else { |
|
2351 |
assert(d1->monitors() == NULL && d2->monitors() == NULL, "not equal"); |
|
2352 |
} |
|
2353 |
||
2354 |
if (d1->caller() != NULL) { |
|
2355 |
assert(d1->caller() != NULL && d2->caller() != NULL, "not equal"); |
|
2356 |
assert_equal(d1->caller(), d2->caller()); |
|
2357 |
} else { |
|
2358 |
assert(d1->caller() == NULL && d2->caller() == NULL, "not equal"); |
|
2359 |
} |
|
2360 |
} |
|
2361 |
||
2362 |
void check_stack_depth(CodeEmitInfo* info, int stack_end) { |
|
6745 | 2363 |
if (info->stack()->bci() != SynchronizationEntryBCI && !info->scope()->method()->is_native()) { |
2364 |
Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); |
|
1 | 2365 |
switch (code) { |
2366 |
case Bytecodes::_ifnull : // fall through |
|
2367 |
case Bytecodes::_ifnonnull : // fall through |
|
2368 |
case Bytecodes::_ifeq : // fall through |
|
2369 |
case Bytecodes::_ifne : // fall through |
|
2370 |
case Bytecodes::_iflt : // fall through |
|
2371 |
case Bytecodes::_ifge : // fall through |
|
2372 |
case Bytecodes::_ifgt : // fall through |
|
2373 |
case Bytecodes::_ifle : // fall through |
|
2374 |
case Bytecodes::_if_icmpeq : // fall through |
|
2375 |
case Bytecodes::_if_icmpne : // fall through |
|
2376 |
case Bytecodes::_if_icmplt : // fall through |
|
2377 |
case Bytecodes::_if_icmpge : // fall through |
|
2378 |
case Bytecodes::_if_icmpgt : // fall through |
|
2379 |
case Bytecodes::_if_icmple : // fall through |
|
2380 |
case Bytecodes::_if_acmpeq : // fall through |
|
2381 |
case Bytecodes::_if_acmpne : |
|
2382 |
assert(stack_end >= -Bytecodes::depth(code), "must have non-empty expression stack at if bytecode"); |
|
2383 |
break; |
|
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
2384 |
default: |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
2385 |
break; |
1 | 2386 |
} |
2387 |
} |
|
2388 |
} |
|
2389 |
||
2390 |
#endif // ASSERT |
|
2391 |
||
2392 |
||
2393 |
IntervalWalker* LinearScan::init_compute_oop_maps() { |
|
2394 |
// setup lists of potential oops for walking |
|
2395 |
Interval* oop_intervals; |
|
2396 |
Interval* non_oop_intervals; |
|
2397 |
||
2398 |
create_unhandled_lists(&oop_intervals, &non_oop_intervals, is_oop_interval, NULL); |
|
2399 |
||
2400 |
// intervals that have no oops inside need not to be processed |
|
2401 |
// to ensure a walking until the last instruction id, add a dummy interval |
|
2402 |
// with a high operation id |
|
2403 |
non_oop_intervals = new Interval(any_reg); |
|
2404 |
non_oop_intervals->add_range(max_jint - 2, max_jint - 1); |
|
2405 |
||
2406 |
return new IntervalWalker(this, oop_intervals, non_oop_intervals); |
|
2407 |
} |
|
2408 |
||
2409 |
||
2410 |
OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo* info, bool is_call_site) { |
|
2411 |
TRACE_LINEAR_SCAN(3, tty->print_cr("creating oop map at op_id %d", op->id())); |
|
2412 |
||
2413 |
// walk before the current operation -> intervals that start at |
|
2414 |
// the operation (= output operands of the operation) are not |
|
2415 |
// included in the oop map |
|
2416 |
iw->walk_before(op->id()); |
|
2417 |
||
2418 |
int frame_size = frame_map()->framesize(); |
|
2419 |
int arg_count = frame_map()->oop_map_arg_count(); |
|
2420 |
OopMap* map = new OopMap(frame_size, arg_count); |
|
2421 |
||
2422 |
// Iterate through active intervals |
|
2423 |
for (Interval* interval = iw->active_first(fixedKind); interval != Interval::end(); interval = interval->next()) { |
|
2424 |
int assigned_reg = interval->assigned_reg(); |
|
2425 |
||
2426 |
assert(interval->current_from() <= op->id() && op->id() <= interval->current_to(), "interval should not be active otherwise"); |
|
2427 |
assert(interval->assigned_regHi() == any_reg, "oop must be single word"); |
|
2428 |
assert(interval->reg_num() >= LIR_OprDesc::vreg_base, "fixed interval found"); |
|
2429 |
||
2430 |
// Check if this range covers the instruction. Intervals that |
|
2431 |
// start or end at the current operation are not included in the |
|
2432 |
// oop map, except in the case of patching moves. For patching |
|
2433 |
// moves, any intervals which end at this instruction are included |
|
2434 |
// in the oop map since we may safepoint while doing the patch |
|
2435 |
// before we've consumed the inputs. |
|
24669 | 2436 |
if (op->is_patching() || op->id() < interval->current_to()) { |
1 | 2437 |
|
2438 |
// caller-save registers must not be included into oop-maps at calls |
|
2439 |
assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten"); |
|
2440 |
||
2441 |
VMReg name = vm_reg_for_interval(interval); |
|
10517
f92c9ff3a15f
7051798: SA-JDI: NPE in Frame.addressOfStackSlot(Frame.java:244)
never
parents:
8921
diff
changeset
|
2442 |
set_oop(map, name); |
1 | 2443 |
|
2444 |
// Spill optimization: when the stack value is guaranteed to be always correct, |
|
2445 |
// then it must be added to the oop map even if the interval is currently in a register |
|
2446 |
if (interval->always_in_memory() && |
|
2447 |
op->id() > interval->spill_definition_pos() && |
|
2448 |
interval->assigned_reg() != interval->canonical_spill_slot()) { |
|
2449 |
assert(interval->spill_definition_pos() > 0, "position not set correctly"); |
|
2450 |
assert(interval->canonical_spill_slot() >= LinearScan::nof_regs, "no spill slot assigned"); |
|
2451 |
assert(interval->assigned_reg() < LinearScan::nof_regs, "interval is on stack, so stack slot is registered twice"); |
|
2452 |
||
10517
f92c9ff3a15f
7051798: SA-JDI: NPE in Frame.addressOfStackSlot(Frame.java:244)
never
parents:
8921
diff
changeset
|
2453 |
set_oop(map, frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs)); |
1 | 2454 |
} |
2455 |
} |
|
2456 |
} |
|
2457 |
||
2458 |
// add oops from lock stack |
|
2459 |
assert(info->stack() != NULL, "CodeEmitInfo must always have a stack"); |
|
6745 | 2460 |
int locks_count = info->stack()->total_locks_size(); |
1 | 2461 |
for (int i = 0; i < locks_count; i++) { |
10517
f92c9ff3a15f
7051798: SA-JDI: NPE in Frame.addressOfStackSlot(Frame.java:244)
never
parents:
8921
diff
changeset
|
2462 |
set_oop(map, frame_map()->monitor_object_regname(i)); |
1 | 2463 |
} |
2464 |
||
2465 |
return map; |
|
2466 |
} |
|
2467 |
||
2468 |
||
2469 |
void LinearScan::compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &visitor, LIR_Op* op) { |
|
2470 |
assert(visitor.info_count() > 0, "no oop map needed"); |
|
2471 |
||
2472 |
// compute oop_map only for first CodeEmitInfo |
|
2473 |
// because it is (in most cases) equal for all other infos of the same operation |
|
2474 |
CodeEmitInfo* first_info = visitor.info_at(0); |
|
2475 |
OopMap* first_oop_map = compute_oop_map(iw, op, first_info, visitor.has_call()); |
|
2476 |
||
2477 |
for (int i = 0; i < visitor.info_count(); i++) { |
|
2478 |
CodeEmitInfo* info = visitor.info_at(i); |
|
2479 |
OopMap* oop_map = first_oop_map; |
|
2480 |
||
24018
77b156916bab
8032410: compiler/uncommontrap/TestStackBangRbp.java times out on Solaris-Sparc V9
roland
parents:
22234
diff
changeset
|
2481 |
// compute worst case interpreter size in case of a deoptimization |
77b156916bab
8032410: compiler/uncommontrap/TestStackBangRbp.java times out on Solaris-Sparc V9
roland
parents:
22234
diff
changeset
|
2482 |
_compilation->update_interpreter_frame_size(info->interpreter_frame_size()); |
77b156916bab
8032410: compiler/uncommontrap/TestStackBangRbp.java times out on Solaris-Sparc V9
roland
parents:
22234
diff
changeset
|
2483 |
|
1 | 2484 |
if (info->stack()->locks_size() != first_info->stack()->locks_size()) { |
2485 |
// this info has a different number of locks then the precomputed oop map |
|
2486 |
// (possible for lock and unlock instructions) -> compute oop map with |
|
2487 |
// correct lock information |
|
2488 |
oop_map = compute_oop_map(iw, op, info, visitor.has_call()); |
|
2489 |
} |
|
2490 |
||
2491 |
if (info->_oop_map == NULL) { |
|
2492 |
info->_oop_map = oop_map; |
|
2493 |
} else { |
|
2494 |
// a CodeEmitInfo can not be shared between different LIR-instructions |
|
2495 |
// because interval splitting can occur anywhere between two instructions |
|
2496 |
// and so the oop maps must be different |
|
2497 |
// -> check if the already set oop_map is exactly the one calculated for this operation |
|
2498 |
assert(info->_oop_map == oop_map, "same CodeEmitInfo used for multiple LIR instructions"); |
|
2499 |
} |
|
2500 |
} |
|
2501 |
} |
|
2502 |
||
2503 |
||
2504 |
// frequently used constants |
|
11792
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2505 |
// Allocate them with new so they are never destroyed (otherwise, a |
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2506 |
// forced exit could destroy these objects while they are still in |
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2507 |
// use). |
13195 | 2508 |
ConstantOopWriteValue* LinearScan::_oop_null_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantOopWriteValue(NULL); |
2509 |
ConstantIntValue* LinearScan::_int_m1_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(-1); |
|
47765
b7c7428eaab9
8189610: Reconcile jvm.h and all jvm_md.h between java.base and hotspot
coleenp
parents:
47216
diff
changeset
|
2510 |
ConstantIntValue* LinearScan::_int_0_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue((jint)0); |
13195 | 2511 |
ConstantIntValue* LinearScan::_int_1_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(1); |
2512 |
ConstantIntValue* LinearScan::_int_2_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(2); |
|
2513 |
LocationValue* _illegal_value = new (ResourceObj::C_HEAP, mtCompiler) LocationValue(Location()); |
|
1 | 2514 |
|
2515 |
void LinearScan::init_compute_debug_info() { |
|
2516 |
// cache for frequently used scope values |
|
2517 |
// (cpu registers and stack slots) |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
2518 |
int cache_size = (LinearScan::nof_cpu_regs + frame_map()->argcount() + max_spills()) * 2; |
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
2519 |
_scope_value_cache = ScopeValueArray(cache_size, cache_size, NULL); |
1 | 2520 |
} |
2521 |
||
2522 |
MonitorValue* LinearScan::location_for_monitor_index(int monitor_index) { |
|
2523 |
Location loc; |
|
2524 |
if (!frame_map()->location_for_monitor_object(monitor_index, &loc)) { |
|
2525 |
bailout("too large frame"); |
|
2526 |
} |
|
2527 |
ScopeValue* object_scope_value = new LocationValue(loc); |
|
2528 |
||
2529 |
if (!frame_map()->location_for_monitor_lock(monitor_index, &loc)) { |
|
2530 |
bailout("too large frame"); |
|
2531 |
} |
|
2532 |
return new MonitorValue(object_scope_value, loc); |
|
2533 |
} |
|
2534 |
||
2535 |
LocationValue* LinearScan::location_for_name(int name, Location::Type loc_type) { |
|
2536 |
Location loc; |
|
2537 |
if (!frame_map()->locations_for_slot(name, loc_type, &loc)) { |
|
2538 |
bailout("too large frame"); |
|
2539 |
} |
|
2540 |
return new LocationValue(loc); |
|
2541 |
} |
|
2542 |
||
2543 |
||
2544 |
int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) { |
|
2545 |
assert(opr->is_constant(), "should not be called otherwise"); |
|
2546 |
||
2547 |
LIR_Const* c = opr->as_constant_ptr(); |
|
2548 |
BasicType t = c->type(); |
|
2549 |
switch (t) { |
|
2550 |
case T_OBJECT: { |
|
2551 |
jobject value = c->as_jobject(); |
|
2552 |
if (value == NULL) { |
|
11792
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2553 |
scope_values->append(_oop_null_scope_value); |
1 | 2554 |
} else { |
2555 |
scope_values->append(new ConstantOopWriteValue(c->as_jobject())); |
|
2556 |
} |
|
2557 |
return 1; |
|
2558 |
} |
|
2559 |
||
2560 |
case T_INT: // fall through |
|
2561 |
case T_FLOAT: { |
|
2562 |
int value = c->as_jint_bits(); |
|
2563 |
switch (value) { |
|
11792
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2564 |
case -1: scope_values->append(_int_m1_scope_value); break; |
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2565 |
case 0: scope_values->append(_int_0_scope_value); break; |
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2566 |
case 1: scope_values->append(_int_1_scope_value); break; |
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2567 |
case 2: scope_values->append(_int_2_scope_value); break; |
1 | 2568 |
default: scope_values->append(new ConstantIntValue(c->as_jint_bits())); break; |
2569 |
} |
|
2570 |
return 1; |
|
2571 |
} |
|
2572 |
||
2573 |
case T_LONG: // fall through |
|
2574 |
case T_DOUBLE: { |
|
4430 | 2575 |
#ifdef _LP64 |
11792
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2576 |
scope_values->append(_int_0_scope_value); |
4430 | 2577 |
scope_values->append(new ConstantLongValue(c->as_jlong_bits())); |
2578 |
#else |
|
1 | 2579 |
if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) { |
2580 |
scope_values->append(new ConstantIntValue(c->as_jint_hi_bits())); |
|
2581 |
scope_values->append(new ConstantIntValue(c->as_jint_lo_bits())); |
|
2582 |
} else { |
|
2583 |
scope_values->append(new ConstantIntValue(c->as_jint_lo_bits())); |
|
2584 |
scope_values->append(new ConstantIntValue(c->as_jint_hi_bits())); |
|
2585 |
} |
|
4430 | 2586 |
#endif |
1 | 2587 |
return 2; |
2588 |
} |
|
2589 |
||
5048
c31b6243f37e
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
4430
diff
changeset
|
2590 |
case T_ADDRESS: { |
c31b6243f37e
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
4430
diff
changeset
|
2591 |
#ifdef _LP64 |
c31b6243f37e
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
4430
diff
changeset
|
2592 |
scope_values->append(new ConstantLongValue(c->as_jint())); |
c31b6243f37e
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
4430
diff
changeset
|
2593 |
#else |
c31b6243f37e
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
4430
diff
changeset
|
2594 |
scope_values->append(new ConstantIntValue(c->as_jint())); |
c31b6243f37e
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
4430
diff
changeset
|
2595 |
#endif |
c31b6243f37e
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
4430
diff
changeset
|
2596 |
return 1; |
c31b6243f37e
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
4430
diff
changeset
|
2597 |
} |
c31b6243f37e
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
4430
diff
changeset
|
2598 |
|
1 | 2599 |
default: |
2600 |
ShouldNotReachHere(); |
|
1066 | 2601 |
return -1; |
1 | 2602 |
} |
2603 |
} |
|
2604 |
||
2605 |
int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) { |
|
2606 |
if (opr->is_single_stack()) { |
|
2607 |
int stack_idx = opr->single_stack_ix(); |
|
2608 |
bool is_oop = opr->is_oop_register(); |
|
2609 |
int cache_idx = (stack_idx + LinearScan::nof_cpu_regs) * 2 + (is_oop ? 1 : 0); |
|
2610 |
||
2611 |
ScopeValue* sv = _scope_value_cache.at(cache_idx); |
|
2612 |
if (sv == NULL) { |
|
2613 |
Location::Type loc_type = is_oop ? Location::oop : Location::normal; |
|
2614 |
sv = location_for_name(stack_idx, loc_type); |
|
2615 |
_scope_value_cache.at_put(cache_idx, sv); |
|
2616 |
} |
|
2617 |
||
2618 |
// check if cached value is correct |
|
2619 |
DEBUG_ONLY(assert_equal(sv, location_for_name(stack_idx, is_oop ? Location::oop : Location::normal))); |
|
2620 |
||
2621 |
scope_values->append(sv); |
|
2622 |
return 1; |
|
2623 |
||
2624 |
} else if (opr->is_single_cpu()) { |
|
2625 |
bool is_oop = opr->is_oop_register(); |
|
2626 |
int cache_idx = opr->cpu_regnr() * 2 + (is_oop ? 1 : 0); |
|
4430 | 2627 |
Location::Type int_loc_type = NOT_LP64(Location::normal) LP64_ONLY(Location::int_in_long); |
1 | 2628 |
|
2629 |
ScopeValue* sv = _scope_value_cache.at(cache_idx); |
|
2630 |
if (sv == NULL) { |
|
4430 | 2631 |
Location::Type loc_type = is_oop ? Location::oop : int_loc_type; |
1 | 2632 |
VMReg rname = frame_map()->regname(opr); |
2633 |
sv = new LocationValue(Location::new_reg_loc(loc_type, rname)); |
|
2634 |
_scope_value_cache.at_put(cache_idx, sv); |
|
2635 |
} |
|
2636 |
||
2637 |
// check if cached value is correct |
|
4430 | 2638 |
DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : int_loc_type, frame_map()->regname(opr))))); |
1 | 2639 |
|
2640 |
scope_values->append(sv); |
|
2641 |
return 1; |
|
2642 |
||
1066 | 2643 |
#ifdef X86 |
1 | 2644 |
} else if (opr->is_single_xmm()) { |
2645 |
VMReg rname = opr->as_xmm_float_reg()->as_VMReg(); |
|
2646 |
LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname)); |
|
2647 |
||
2648 |
scope_values->append(sv); |
|
2649 |
return 1; |
|
2650 |
#endif |
|
2651 |
||
2652 |
} else if (opr->is_single_fpu()) { |
|
1066 | 2653 |
#ifdef X86 |
1 | 2654 |
// the exact location of fpu stack values is only known |
2655 |
// during fpu stack allocation, so the stack allocator object |
|
2656 |
// must be present |
|
2657 |
assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)"); |
|
2658 |
assert(_fpu_stack_allocator != NULL, "must be present"); |
|
2659 |
opr = _fpu_stack_allocator->to_fpu_stack(opr); |
|
2660 |
#endif |
|
2661 |
||
2662 |
Location::Type loc_type = float_saved_as_double ? Location::float_in_dbl : Location::normal; |
|
2663 |
VMReg rname = frame_map()->fpu_regname(opr->fpu_regnr()); |
|
10732
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2664 |
#ifndef __SOFTFP__ |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2665 |
#ifndef VM_LITTLE_ENDIAN |
42063
dca9294d9f59
8166561: [s390] Adaptions needed for s390 port in C1 and C2.
goetz
parents:
39219
diff
changeset
|
2666 |
// On S390 a (single precision) float value occupies only the high |
dca9294d9f59
8166561: [s390] Adaptions needed for s390 port in C1 and C2.
goetz
parents:
39219
diff
changeset
|
2667 |
// word of the full double register. So when the double register is |
dca9294d9f59
8166561: [s390] Adaptions needed for s390 port in C1 and C2.
goetz
parents:
39219
diff
changeset
|
2668 |
// stored to memory (e.g. by the RegisterSaver), then the float value |
dca9294d9f59
8166561: [s390] Adaptions needed for s390 port in C1 and C2.
goetz
parents:
39219
diff
changeset
|
2669 |
// is found at offset 0. I.e. the code below is not needed on S390. |
dca9294d9f59
8166561: [s390] Adaptions needed for s390 port in C1 and C2.
goetz
parents:
39219
diff
changeset
|
2670 |
#ifndef S390 |
10732
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2671 |
if (! float_saved_as_double) { |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2672 |
// On big endian system, we may have an issue if float registers use only |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2673 |
// the low half of the (same) double registers. |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2674 |
// Both the float and the double could have the same regnr but would correspond |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2675 |
// to two different addresses once saved. |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2676 |
|
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2677 |
// get next safely (no assertion checks) |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2678 |
VMReg next = VMRegImpl::as_VMReg(1+rname->value()); |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2679 |
if (next->is_reg() && |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2680 |
(next->as_FloatRegister() == rname->as_FloatRegister())) { |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2681 |
// the back-end does use the same numbering for the double and the float |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2682 |
rname = next; // VMReg for the low bits, e.g. the real VMReg for the float |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2683 |
} |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2684 |
} |
42063
dca9294d9f59
8166561: [s390] Adaptions needed for s390 port in C1 and C2.
goetz
parents:
39219
diff
changeset
|
2685 |
#endif // !S390 |
10732
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2686 |
#endif |
6a893b38ee30
7096366: PPC: corruption of floating-point values with DeoptimizeALot
bdelsart
parents:
10517
diff
changeset
|
2687 |
#endif |
1 | 2688 |
LocationValue* sv = new LocationValue(Location::new_reg_loc(loc_type, rname)); |
2689 |
||
2690 |
scope_values->append(sv); |
|
2691 |
return 1; |
|
2692 |
||
2693 |
} else { |
|
2694 |
// double-size operands |
|
2695 |
||
2696 |
ScopeValue* first; |
|
2697 |
ScopeValue* second; |
|
2698 |
||
2699 |
if (opr->is_double_stack()) { |
|
1066 | 2700 |
#ifdef _LP64 |
2701 |
Location loc1; |
|
2702 |
Location::Type loc_type = opr->type() == T_LONG ? Location::lng : Location::dbl; |
|
2703 |
if (!frame_map()->locations_for_slot(opr->double_stack_ix(), loc_type, &loc1, NULL)) { |
|
2704 |
bailout("too large frame"); |
|
2705 |
} |
|
2706 |
// Does this reverse on x86 vs. sparc? |
|
2707 |
first = new LocationValue(loc1); |
|
11792
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2708 |
second = _int_0_scope_value; |
1066 | 2709 |
#else |
1 | 2710 |
Location loc1, loc2; |
2711 |
if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) { |
|
2712 |
bailout("too large frame"); |
|
2713 |
} |
|
2714 |
first = new LocationValue(loc1); |
|
2715 |
second = new LocationValue(loc2); |
|
1066 | 2716 |
#endif // _LP64 |
1 | 2717 |
|
2718 |
} else if (opr->is_double_cpu()) { |
|
2719 |
#ifdef _LP64 |
|
2720 |
VMReg rname_first = opr->as_register_lo()->as_VMReg(); |
|
2721 |
first = new LocationValue(Location::new_reg_loc(Location::lng, rname_first)); |
|
11792
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2722 |
second = _int_0_scope_value; |
1 | 2723 |
#else |
2724 |
VMReg rname_first = opr->as_register_lo()->as_VMReg(); |
|
2725 |
VMReg rname_second = opr->as_register_hi()->as_VMReg(); |
|
2726 |
||
2727 |
if (hi_word_offset_in_bytes < lo_word_offset_in_bytes) { |
|
2728 |
// lo/hi and swapped relative to first and second, so swap them |
|
2729 |
VMReg tmp = rname_first; |
|
2730 |
rname_first = rname_second; |
|
2731 |
rname_second = tmp; |
|
2732 |
} |
|
2733 |
||
2734 |
first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); |
|
2735 |
second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); |
|
1066 | 2736 |
#endif //_LP64 |
2737 |
||
2738 |
||
2739 |
#ifdef X86 |
|
1 | 2740 |
} else if (opr->is_double_xmm()) { |
2741 |
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation"); |
|
2742 |
VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg(); |
|
5253 | 2743 |
# ifdef _LP64 |
2744 |
first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first)); |
|
11792
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2745 |
second = _int_0_scope_value; |
5253 | 2746 |
# else |
1 | 2747 |
first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); |
2748 |
// %%% This is probably a waste but we'll keep things as they were for now |
|
2749 |
if (true) { |
|
2750 |
VMReg rname_second = rname_first->next(); |
|
2751 |
second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); |
|
2752 |
} |
|
5253 | 2753 |
# endif |
1 | 2754 |
#endif |
2755 |
||
2756 |
} else if (opr->is_double_fpu()) { |
|
2757 |
// On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of |
|
1066 | 2758 |
// the double as float registers in the native ordering. On X86, |
1 | 2759 |
// fpu_regnrLo is a FPU stack slot whose VMReg represents |
2760 |
// the low-order word of the double and fpu_regnrLo + 1 is the |
|
2761 |
// name for the other half. *first and *second must represent the |
|
2762 |
// least and most significant words, respectively. |
|
2763 |
||
1066 | 2764 |
#ifdef X86 |
1 | 2765 |
// the exact location of fpu stack values is only known |
2766 |
// during fpu stack allocation, so the stack allocator object |
|
2767 |
// must be present |
|
2768 |
assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)"); |
|
2769 |
assert(_fpu_stack_allocator != NULL, "must be present"); |
|
2770 |
opr = _fpu_stack_allocator->to_fpu_stack(opr); |
|
2771 |
||
8664
3eb3d84f5e07
7011490: Wrong computation results in Test6880034
vladidan
parents:
8107
diff
changeset
|
2772 |
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)"); |
1 | 2773 |
#endif |
2774 |
#ifdef SPARC |
|
2775 |
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)"); |
|
2776 |
#endif |
|
29474
81a5c5330d08
8072383: resolve conflicts between open and closed ports
dlong
parents:
25931
diff
changeset
|
2777 |
#ifdef ARM32 |
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2778 |
assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)"); |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2779 |
#endif |
34220 | 2780 |
#ifdef PPC32 |
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2781 |
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)"); |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
2782 |
#endif |
1 | 2783 |
|
8664
3eb3d84f5e07
7011490: Wrong computation results in Test6880034
vladidan
parents:
8107
diff
changeset
|
2784 |
#ifdef VM_LITTLE_ENDIAN |
3eb3d84f5e07
7011490: Wrong computation results in Test6880034
vladidan
parents:
8107
diff
changeset
|
2785 |
VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo()); |
3eb3d84f5e07
7011490: Wrong computation results in Test6880034
vladidan
parents:
8107
diff
changeset
|
2786 |
#else |
1 | 2787 |
VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi()); |
8664
3eb3d84f5e07
7011490: Wrong computation results in Test6880034
vladidan
parents:
8107
diff
changeset
|
2788 |
#endif |
3eb3d84f5e07
7011490: Wrong computation results in Test6880034
vladidan
parents:
8107
diff
changeset
|
2789 |
|
5253 | 2790 |
#ifdef _LP64 |
2791 |
first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first)); |
|
11792
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2792 |
second = _int_0_scope_value; |
5253 | 2793 |
#else |
1 | 2794 |
first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); |
2795 |
// %%% This is probably a waste but we'll keep things as they were for now |
|
2796 |
if (true) { |
|
2797 |
VMReg rname_second = rname_first->next(); |
|
2798 |
second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); |
|
2799 |
} |
|
5253 | 2800 |
#endif |
1 | 2801 |
|
2802 |
} else { |
|
2803 |
ShouldNotReachHere(); |
|
2804 |
first = NULL; |
|
2805 |
second = NULL; |
|
2806 |
} |
|
2807 |
||
2808 |
assert(first != NULL && second != NULL, "must be set"); |
|
2809 |
// The convention the interpreter uses is that the second local |
|
2810 |
// holds the first raw word of the native double representation. |
|
2811 |
// This is actually reasonable, since locals and stack arrays |
|
2812 |
// grow downwards in all implementations. |
|
2813 |
// (If, on some machine, the interpreter's Java locals or stack |
|
2814 |
// were to grow upwards, the embedded doubles would be word-swapped.) |
|
2815 |
scope_values->append(second); |
|
2816 |
scope_values->append(first); |
|
2817 |
return 2; |
|
2818 |
} |
|
2819 |
} |
|
2820 |
||
2821 |
||
2822 |
int LinearScan::append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values) { |
|
2823 |
if (value != NULL) { |
|
2824 |
LIR_Opr opr = value->operand(); |
|
2825 |
Constant* con = value->as_Constant(); |
|
2826 |
||
2827 |
assert(con == NULL || opr->is_virtual() || opr->is_constant() || opr->is_illegal(), "asumption: Constant instructions have only constant operands (or illegal if constant is optimized away)"); |
|
2828 |
assert(con != NULL || opr->is_virtual(), "asumption: non-Constant instructions have only virtual operands"); |
|
2829 |
||
2830 |
if (con != NULL && !con->is_pinned() && !opr->is_constant()) { |
|
2831 |
// Unpinned constants may have a virtual operand for a part of the lifetime |
|
2832 |
// or may be illegal when it was optimized away, |
|
2833 |
// so always use a constant operand |
|
2834 |
opr = LIR_OprFact::value_type(con->type()); |
|
2835 |
} |
|
2836 |
assert(opr->is_virtual() || opr->is_constant(), "other cases not allowed here"); |
|
2837 |
||
2838 |
if (opr->is_virtual()) { |
|
2839 |
LIR_OpVisitState::OprMode mode = LIR_OpVisitState::inputMode; |
|
2840 |
||
2841 |
BlockBegin* block = block_of_op_with_id(op_id); |
|
2842 |
if (block->number_of_sux() == 1 && op_id == block->last_lir_instruction_id()) { |
|
2843 |
// generating debug information for the last instruction of a block. |
|
2844 |
// if this instruction is a branch, spill moves are inserted before this branch |
|
2845 |
// and so the wrong operand would be returned (spill moves at block boundaries are not |
|
2846 |
// considered in the live ranges of intervals) |
|
2847 |
// Solution: use the first op_id of the branch target block instead. |
|
2848 |
if (block->lir()->instructions_list()->last()->as_OpBranch() != NULL) { |
|
2849 |
if (block->live_out().at(opr->vreg_number())) { |
|
2850 |
op_id = block->sux_at(0)->first_lir_instruction_id(); |
|
2851 |
mode = LIR_OpVisitState::outputMode; |
|
2852 |
} |
|
2853 |
} |
|
2854 |
} |
|
2855 |
||
2856 |
// Get current location of operand |
|
2857 |
// The operand must be live because debug information is considered when building the intervals |
|
2858 |
// if the interval is not live, color_lir_opr will cause an assertion failure |
|
2859 |
opr = color_lir_opr(opr, op_id, mode); |
|
2860 |
assert(!has_call(op_id) || opr->is_stack() || !is_caller_save(reg_num(opr)), "can not have caller-save register operands at calls"); |
|
2861 |
||
2862 |
// Append to ScopeValue array |
|
2863 |
return append_scope_value_for_operand(opr, scope_values); |
|
2864 |
||
2865 |
} else { |
|
2866 |
assert(value->as_Constant() != NULL, "all other instructions have only virtual operands"); |
|
2867 |
assert(opr->is_constant(), "operand must be constant"); |
|
2868 |
||
2869 |
return append_scope_value_for_constant(opr, scope_values); |
|
2870 |
} |
|
2871 |
} else { |
|
2872 |
// append a dummy value because real value not needed |
|
11792
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2873 |
scope_values->append(_illegal_value); |
1 | 2874 |
return 1; |
2875 |
} |
|
2876 |
} |
|
2877 |
||
2878 |
||
6745 | 2879 |
IRScopeDebugInfo* LinearScan::compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state) { |
1 | 2880 |
IRScopeDebugInfo* caller_debug_info = NULL; |
6745 | 2881 |
|
2882 |
ValueStack* caller_state = cur_state->caller_state(); |
|
1 | 2883 |
if (caller_state != NULL) { |
2884 |
// process recursively to compute outermost scope first |
|
6745 | 2885 |
caller_debug_info = compute_debug_info_for_scope(op_id, cur_scope->caller(), caller_state, innermost_state); |
1 | 2886 |
} |
2887 |
||
2888 |
// initialize these to null. |
|
2889 |
// If we don't need deopt info or there are no locals, expressions or monitors, |
|
2890 |
// then these get recorded as no information and avoids the allocation of 0 length arrays. |
|
2891 |
GrowableArray<ScopeValue*>* locals = NULL; |
|
2892 |
GrowableArray<ScopeValue*>* expressions = NULL; |
|
2893 |
GrowableArray<MonitorValue*>* monitors = NULL; |
|
2894 |
||
2895 |
// describe local variable values |
|
6745 | 2896 |
int nof_locals = cur_state->locals_size(); |
1 | 2897 |
if (nof_locals > 0) { |
2898 |
locals = new GrowableArray<ScopeValue*>(nof_locals); |
|
2899 |
||
2900 |
int pos = 0; |
|
2901 |
while (pos < nof_locals) { |
|
2902 |
assert(pos < cur_state->locals_size(), "why not?"); |
|
2903 |
||
2904 |
Value local = cur_state->local_at(pos); |
|
2905 |
pos += append_scope_value(op_id, local, locals); |
|
2906 |
||
2907 |
assert(locals->length() == pos, "must match"); |
|
2908 |
} |
|
2909 |
assert(locals->length() == cur_scope->method()->max_locals(), "wrong number of locals"); |
|
2910 |
assert(locals->length() == cur_state->locals_size(), "wrong number of locals"); |
|
6745 | 2911 |
} else if (cur_scope->method()->max_locals() > 0) { |
2912 |
assert(cur_state->kind() == ValueStack::EmptyExceptionState, "should be"); |
|
2913 |
nof_locals = cur_scope->method()->max_locals(); |
|
2914 |
locals = new GrowableArray<ScopeValue*>(nof_locals); |
|
2915 |
for(int i = 0; i < nof_locals; i++) { |
|
11792
fd885d66cb86
7143038: SIGSEGV in assert_equal / LinearScan::assign_reg_num
roland
parents:
10732
diff
changeset
|
2916 |
locals->append(_illegal_value); |
6745 | 2917 |
} |
2918 |
} |
|
1 | 2919 |
|
2920 |
// describe expression stack |
|
6745 | 2921 |
int nof_stack = cur_state->stack_size(); |
1 | 2922 |
if (nof_stack > 0) { |
2923 |
expressions = new GrowableArray<ScopeValue*>(nof_stack); |
|
2924 |
||
6745 | 2925 |
int pos = 0; |
2926 |
while (pos < nof_stack) { |
|
2927 |
Value expression = cur_state->stack_at_inc(pos); |
|
1 | 2928 |
append_scope_value(op_id, expression, expressions); |
2929 |
||
6745 | 2930 |
assert(expressions->length() == pos, "must match"); |
2931 |
} |
|
2932 |
assert(expressions->length() == cur_state->stack_size(), "wrong number of stack entries"); |
|
1 | 2933 |
} |
2934 |
||
2935 |
// describe monitors |
|
6745 | 2936 |
int nof_locks = cur_state->locks_size(); |
1 | 2937 |
if (nof_locks > 0) { |
6745 | 2938 |
int lock_offset = cur_state->caller_state() != NULL ? cur_state->caller_state()->total_locks_size() : 0; |
1 | 2939 |
monitors = new GrowableArray<MonitorValue*>(nof_locks); |
6745 | 2940 |
for (int i = 0; i < nof_locks; i++) { |
2941 |
monitors->append(location_for_monitor_index(lock_offset + i)); |
|
2942 |
} |
|
2943 |
} |
|
2944 |
||
2945 |
return new IRScopeDebugInfo(cur_scope, cur_state->bci(), locals, expressions, monitors, caller_debug_info); |
|
1 | 2946 |
} |
2947 |
||
2948 |
||
2949 |
void LinearScan::compute_debug_info(CodeEmitInfo* info, int op_id) { |
|
2950 |
TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id)); |
|
2951 |
||
2952 |
IRScope* innermost_scope = info->scope(); |
|
2953 |
ValueStack* innermost_state = info->stack(); |
|
2954 |
||
2955 |
assert(innermost_scope != NULL && innermost_state != NULL, "why is it missing?"); |
|
2956 |
||
6745 | 2957 |
DEBUG_ONLY(check_stack_depth(info, innermost_state->stack_size())); |
1 | 2958 |
|
2959 |
if (info->_scope_debug_info == NULL) { |
|
2960 |
// compute debug information |
|
6745 | 2961 |
info->_scope_debug_info = compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state); |
1 | 2962 |
} else { |
2963 |
// debug information already set. Check that it is correct from the current point of view |
|
6745 | 2964 |
DEBUG_ONLY(assert_equal(info->_scope_debug_info, compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state))); |
1 | 2965 |
} |
2966 |
} |
|
2967 |
||
2968 |
||
2969 |
void LinearScan::assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw) { |
|
2970 |
LIR_OpVisitState visitor; |
|
2971 |
int num_inst = instructions->length(); |
|
2972 |
bool has_dead = false; |
|
2973 |
||
2974 |
for (int j = 0; j < num_inst; j++) { |
|
2975 |
LIR_Op* op = instructions->at(j); |
|
2976 |
if (op == NULL) { // this can happen when spill-moves are removed in eliminate_spill_moves |
|
2977 |
has_dead = true; |
|
2978 |
continue; |
|
2979 |
} |
|
2980 |
int op_id = op->id(); |
|
2981 |
||
2982 |
// visit instruction to get list of operands |
|
2983 |
visitor.visit(op); |
|
2984 |
||
2985 |
// iterate all modes of the visitor and process all virtual operands |
|
2986 |
for_each_visitor_mode(mode) { |
|
2987 |
int n = visitor.opr_count(mode); |
|
2988 |
for (int k = 0; k < n; k++) { |
|
2989 |
LIR_Opr opr = visitor.opr_at(mode, k); |
|
2990 |
if (opr->is_virtual_register()) { |
|
2991 |
visitor.set_opr_at(mode, k, color_lir_opr(opr, op_id, mode)); |
|
2992 |
} |
|
2993 |
} |
|
2994 |
} |
|
2995 |
||
2996 |
if (visitor.info_count() > 0) { |
|
2997 |
// exception handling |
|
2998 |
if (compilation()->has_exception_handlers()) { |
|
2999 |
XHandlers* xhandlers = visitor.all_xhandler(); |
|
3000 |
int n = xhandlers->length(); |
|
3001 |
for (int k = 0; k < n; k++) { |
|
3002 |
XHandler* handler = xhandlers->handler_at(k); |
|
3003 |
if (handler->entry_code() != NULL) { |
|
3004 |
assign_reg_num(handler->entry_code()->instructions_list(), NULL); |
|
3005 |
} |
|
3006 |
} |
|
3007 |
} else { |
|
3008 |
assert(visitor.all_xhandler()->length() == 0, "missed exception handler"); |
|
3009 |
} |
|
3010 |
||
3011 |
// compute oop map |
|
3012 |
assert(iw != NULL, "needed for compute_oop_map"); |
|
3013 |
compute_oop_map(iw, visitor, op); |
|
3014 |
||
3015 |
// compute debug information |
|
3016 |
if (!use_fpu_stack_allocation()) { |
|
3017 |
// compute debug information if fpu stack allocation is not needed. |
|
3018 |
// when fpu stack allocation is needed, the debug information can not |
|
3019 |
// be computed here because the exact location of fpu operands is not known |
|
3020 |
// -> debug information is created inside the fpu stack allocator |
|
3021 |
int n = visitor.info_count(); |
|
3022 |
for (int k = 0; k < n; k++) { |
|
3023 |
compute_debug_info(visitor.info_at(k), op_id); |
|
3024 |
} |
|
3025 |
} |
|
3026 |
} |
|
3027 |
||
3028 |
#ifdef ASSERT |
|
3029 |
// make sure we haven't made the op invalid. |
|
3030 |
op->verify(); |
|
3031 |
#endif |
|
3032 |
||
3033 |
// remove useless moves |
|
3034 |
if (op->code() == lir_move) { |
|
3035 |
assert(op->as_Op1() != NULL, "move must be LIR_Op1"); |
|
3036 |
LIR_Op1* move = (LIR_Op1*)op; |
|
3037 |
LIR_Opr src = move->in_opr(); |
|
3038 |
LIR_Opr dst = move->result_opr(); |
|
3039 |
if (dst == src || |
|
46630
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
3040 |
(!dst->is_pointer() && !src->is_pointer() && |
75aa3e39d02c
8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8
jwilhelm
parents:
42063
diff
changeset
|
3041 |
src->is_same_register(dst))) { |
1 | 3042 |
instructions->at_put(j, NULL); |
3043 |
has_dead = true; |
|
3044 |
} |
|
3045 |
} |
|
3046 |
} |
|
3047 |
||
3048 |
if (has_dead) { |
|
3049 |
// iterate all instructions of the block and remove all null-values. |
|
3050 |
int insert_point = 0; |
|
3051 |
for (int j = 0; j < num_inst; j++) { |
|
3052 |
LIR_Op* op = instructions->at(j); |
|
3053 |
if (op != NULL) { |
|
3054 |
if (insert_point != j) { |
|
3055 |
instructions->at_put(insert_point, op); |
|
3056 |
} |
|
3057 |
insert_point++; |
|
3058 |
} |
|
3059 |
} |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
3060 |
instructions->trunc_to(insert_point); |
1 | 3061 |
} |
3062 |
} |
|
3063 |
||
3064 |
void LinearScan::assign_reg_num() { |
|
3065 |
TIME_LINEAR_SCAN(timer_assign_reg_num); |
|
3066 |
||
3067 |
init_compute_debug_info(); |
|
3068 |
IntervalWalker* iw = init_compute_oop_maps(); |
|
3069 |
||
3070 |
int num_blocks = block_count(); |
|
3071 |
for (int i = 0; i < num_blocks; i++) { |
|
3072 |
BlockBegin* block = block_at(i); |
|
3073 |
assign_reg_num(block->lir()->instructions_list(), iw); |
|
3074 |
} |
|
3075 |
} |
|
3076 |
||
3077 |
||
3078 |
void LinearScan::do_linear_scan() { |
|
3079 |
NOT_PRODUCT(_total_timer.begin_method()); |
|
3080 |
||
3081 |
number_instructions(); |
|
3082 |
||
3083 |
NOT_PRODUCT(print_lir(1, "Before Register Allocation")); |
|
3084 |
||
3085 |
compute_local_live_sets(); |
|
3086 |
compute_global_live_sets(); |
|
3087 |
CHECK_BAILOUT(); |
|
3088 |
||
3089 |
build_intervals(); |
|
3090 |
CHECK_BAILOUT(); |
|
3091 |
sort_intervals_before_allocation(); |
|
3092 |
||
3093 |
NOT_PRODUCT(print_intervals("Before Register Allocation")); |
|
3094 |
NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_before_alloc)); |
|
3095 |
||
3096 |
allocate_registers(); |
|
3097 |
CHECK_BAILOUT(); |
|
3098 |
||
3099 |
resolve_data_flow(); |
|
3100 |
if (compilation()->has_exception_handlers()) { |
|
3101 |
resolve_exception_handlers(); |
|
3102 |
} |
|
3103 |
// fill in number of spill slots into frame_map |
|
3104 |
propagate_spill_slots(); |
|
3105 |
CHECK_BAILOUT(); |
|
3106 |
||
3107 |
NOT_PRODUCT(print_intervals("After Register Allocation")); |
|
3108 |
NOT_PRODUCT(print_lir(2, "LIR after register allocation:")); |
|
3109 |
||
3110 |
sort_intervals_after_allocation(); |
|
2566
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3111 |
|
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3112 |
DEBUG_ONLY(verify()); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3113 |
|
1 | 3114 |
eliminate_spill_moves(); |
3115 |
assign_reg_num(); |
|
3116 |
CHECK_BAILOUT(); |
|
3117 |
||
3118 |
NOT_PRODUCT(print_lir(2, "LIR after assignment of register numbers:")); |
|
3119 |
NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_after_asign)); |
|
3120 |
||
3121 |
{ TIME_LINEAR_SCAN(timer_allocate_fpu_stack); |
|
3122 |
||
3123 |
if (use_fpu_stack_allocation()) { |
|
3124 |
allocate_fpu_stack(); // Only has effect on Intel |
|
3125 |
NOT_PRODUCT(print_lir(2, "LIR after FPU stack allocation:")); |
|
3126 |
} |
|
3127 |
} |
|
3128 |
||
3129 |
{ TIME_LINEAR_SCAN(timer_optimize_lir); |
|
3130 |
||
3131 |
EdgeMoveOptimizer::optimize(ir()->code()); |
|
3132 |
ControlFlowOptimizer::optimize(ir()->code()); |
|
3133 |
// check that cfg is still correct after optimizations |
|
3134 |
ir()->verify(); |
|
3135 |
} |
|
3136 |
||
3137 |
NOT_PRODUCT(print_lir(1, "Before Code Generation", false)); |
|
3138 |
NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_final)); |
|
3139 |
NOT_PRODUCT(_total_timer.end_method(this)); |
|
3140 |
} |
|
3141 |
||
3142 |
||
3143 |
// ********** Printing functions |
|
3144 |
||
3145 |
#ifndef PRODUCT |
|
3146 |
||
3147 |
void LinearScan::print_timers(double total) { |
|
3148 |
_total_timer.print(total); |
|
3149 |
} |
|
3150 |
||
3151 |
void LinearScan::print_statistics() { |
|
3152 |
_stat_before_alloc.print("before allocation"); |
|
3153 |
_stat_after_asign.print("after assignment of register"); |
|
3154 |
_stat_final.print("after optimization"); |
|
3155 |
} |
|
3156 |
||
3157 |
void LinearScan::print_bitmap(BitMap& b) { |
|
3158 |
for (unsigned int i = 0; i < b.size(); i++) { |
|
3159 |
if (b.at(i)) tty->print("%d ", i); |
|
3160 |
} |
|
3161 |
tty->cr(); |
|
3162 |
} |
|
3163 |
||
3164 |
void LinearScan::print_intervals(const char* label) { |
|
3165 |
if (TraceLinearScanLevel >= 1) { |
|
3166 |
int i; |
|
3167 |
tty->cr(); |
|
3168 |
tty->print_cr("%s", label); |
|
3169 |
||
3170 |
for (i = 0; i < interval_count(); i++) { |
|
3171 |
Interval* interval = interval_at(i); |
|
3172 |
if (interval != NULL) { |
|
3173 |
interval->print(); |
|
3174 |
} |
|
3175 |
} |
|
3176 |
||
3177 |
tty->cr(); |
|
3178 |
tty->print_cr("--- Basic Blocks ---"); |
|
3179 |
for (i = 0; i < block_count(); i++) { |
|
3180 |
BlockBegin* block = block_at(i); |
|
3181 |
tty->print("B%d [%d, %d, %d, %d] ", block->block_id(), block->first_lir_instruction_id(), block->last_lir_instruction_id(), block->loop_index(), block->loop_depth()); |
|
3182 |
} |
|
3183 |
tty->cr(); |
|
3184 |
tty->cr(); |
|
3185 |
} |
|
3186 |
||
3187 |
if (PrintCFGToFile) { |
|
3188 |
CFGPrinter::print_intervals(&_intervals, label); |
|
3189 |
} |
|
3190 |
} |
|
3191 |
||
3192 |
void LinearScan::print_lir(int level, const char* label, bool hir_valid) { |
|
3193 |
if (TraceLinearScanLevel >= level) { |
|
3194 |
tty->cr(); |
|
3195 |
tty->print_cr("%s", label); |
|
3196 |
print_LIR(ir()->linear_scan_order()); |
|
3197 |
tty->cr(); |
|
3198 |
} |
|
3199 |
||
3200 |
if (level == 1 && PrintCFGToFile) { |
|
3201 |
CFGPrinter::print_cfg(ir()->linear_scan_order(), label, hir_valid, true); |
|
3202 |
} |
|
3203 |
} |
|
3204 |
||
3205 |
#endif //PRODUCT |
|
3206 |
||
3207 |
||
3208 |
// ********** verification functions for allocation |
|
3209 |
// (check that all intervals have a correct register and that no registers are overwritten) |
|
3210 |
#ifdef ASSERT |
|
3211 |
||
3212 |
void LinearScan::verify() { |
|
3213 |
TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying intervals ******************************************")); |
|
3214 |
verify_intervals(); |
|
3215 |
||
3216 |
TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying that no oops are in fixed intervals ****************")); |
|
3217 |
verify_no_oops_in_fixed_intervals(); |
|
3218 |
||
3219 |
TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying that unpinned constants are not alive across block boundaries")); |
|
3220 |
verify_constants(); |
|
3221 |
||
3222 |
TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying register allocation ********************************")); |
|
3223 |
verify_registers(); |
|
3224 |
||
3225 |
TRACE_LINEAR_SCAN(2, tty->print_cr("********* no errors found **********************************************")); |
|
3226 |
} |
|
3227 |
||
3228 |
void LinearScan::verify_intervals() { |
|
3229 |
int len = interval_count(); |
|
3230 |
bool has_error = false; |
|
3231 |
||
3232 |
for (int i = 0; i < len; i++) { |
|
3233 |
Interval* i1 = interval_at(i); |
|
3234 |
if (i1 == NULL) continue; |
|
3235 |
||
3236 |
i1->check_split_children(); |
|
3237 |
||
3238 |
if (i1->reg_num() != i) { |
|
3239 |
tty->print_cr("Interval %d is on position %d in list", i1->reg_num(), i); i1->print(); tty->cr(); |
|
3240 |
has_error = true; |
|
3241 |
} |
|
3242 |
||
3243 |
if (i1->reg_num() >= LIR_OprDesc::vreg_base && i1->type() == T_ILLEGAL) { |
|
3244 |
tty->print_cr("Interval %d has no type assigned", i1->reg_num()); i1->print(); tty->cr(); |
|
3245 |
has_error = true; |
|
3246 |
} |
|
3247 |
||
3248 |
if (i1->assigned_reg() == any_reg) { |
|
3249 |
tty->print_cr("Interval %d has no register assigned", i1->reg_num()); i1->print(); tty->cr(); |
|
3250 |
has_error = true; |
|
3251 |
} |
|
3252 |
||
3253 |
if (i1->assigned_reg() == i1->assigned_regHi()) { |
|
3254 |
tty->print_cr("Interval %d: low and high register equal", i1->reg_num()); i1->print(); tty->cr(); |
|
3255 |
has_error = true; |
|
3256 |
} |
|
3257 |
||
3258 |
if (!is_processed_reg_num(i1->assigned_reg())) { |
|
3259 |
tty->print_cr("Can not have an Interval for an ignored register"); i1->print(); tty->cr(); |
|
3260 |
has_error = true; |
|
3261 |
} |
|
3262 |
||
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
3263 |
// special intervals that are created in MoveResolver |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
3264 |
// -> ignore them because the range information has no meaning there |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
3265 |
if (i1->from() == 1 && i1->to() == 2) continue; |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
3266 |
|
1 | 3267 |
if (i1->first() == Range::end()) { |
3268 |
tty->print_cr("Interval %d has no Range", i1->reg_num()); i1->print(); tty->cr(); |
|
3269 |
has_error = true; |
|
3270 |
} |
|
3271 |
||
3272 |
for (Range* r = i1->first(); r != Range::end(); r = r->next()) { |
|
3273 |
if (r->from() >= r->to()) { |
|
3274 |
tty->print_cr("Interval %d has zero length range", i1->reg_num()); i1->print(); tty->cr(); |
|
3275 |
has_error = true; |
|
3276 |
} |
|
3277 |
} |
|
3278 |
||
3279 |
for (int j = i + 1; j < len; j++) { |
|
3280 |
Interval* i2 = interval_at(j); |
|
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
3281 |
if (i2 == NULL || (i2->from() == 1 && i2->to() == 2)) continue; |
1 | 3282 |
|
3283 |
int r1 = i1->assigned_reg(); |
|
3284 |
int r1Hi = i1->assigned_regHi(); |
|
3285 |
int r2 = i2->assigned_reg(); |
|
3286 |
int r2Hi = i2->assigned_regHi(); |
|
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
3287 |
if ((r1 == r2 || r1 == r2Hi || (r1Hi != any_reg && (r1Hi == r2 || r1Hi == r2Hi))) && i1->intersects(i2)) { |
1 | 3288 |
tty->print_cr("Intervals %d and %d overlap and have the same register assigned", i1->reg_num(), i2->reg_num()); |
3289 |
i1->print(); tty->cr(); |
|
3290 |
i2->print(); tty->cr(); |
|
3291 |
has_error = true; |
|
3292 |
} |
|
3293 |
} |
|
3294 |
} |
|
3295 |
||
3296 |
assert(has_error == false, "register allocation invalid"); |
|
3297 |
} |
|
3298 |
||
3299 |
||
3300 |
void LinearScan::verify_no_oops_in_fixed_intervals() { |
|
2566
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3301 |
Interval* fixed_intervals; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3302 |
Interval* other_intervals; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3303 |
create_unhandled_lists(&fixed_intervals, &other_intervals, is_precolored_cpu_interval, NULL); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3304 |
|
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3305 |
// to ensure a walking until the last instruction id, add a dummy interval |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3306 |
// with a high operation id |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3307 |
other_intervals = new Interval(any_reg); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3308 |
other_intervals->add_range(max_jint - 2, max_jint - 1); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3309 |
IntervalWalker* iw = new IntervalWalker(this, fixed_intervals, other_intervals); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3310 |
|
1 | 3311 |
LIR_OpVisitState visitor; |
3312 |
for (int i = 0; i < block_count(); i++) { |
|
3313 |
BlockBegin* block = block_at(i); |
|
3314 |
||
3315 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
3316 |
||
3317 |
for (int j = 0; j < instructions->length(); j++) { |
|
3318 |
LIR_Op* op = instructions->at(j); |
|
3319 |
int op_id = op->id(); |
|
3320 |
||
3321 |
visitor.visit(op); |
|
3322 |
||
2566
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3323 |
if (visitor.info_count() > 0) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3324 |
iw->walk_before(op->id()); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3325 |
bool check_live = true; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3326 |
if (op->code() == lir_move) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3327 |
LIR_Op1* move = (LIR_Op1*)op; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3328 |
check_live = (move->patch_code() == lir_patch_none); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3329 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3330 |
LIR_OpBranch* branch = op->as_OpBranch(); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3331 |
if (branch != NULL && branch->stub() != NULL && branch->stub()->is_exception_throw_stub()) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3332 |
// Don't bother checking the stub in this case since the |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3333 |
// exception stub will never return to normal control flow. |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3334 |
check_live = false; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3335 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3336 |
|
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3337 |
// Make sure none of the fixed registers is live across an |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3338 |
// oopmap since we can't handle that correctly. |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3339 |
if (check_live) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3340 |
for (Interval* interval = iw->active_first(fixedKind); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3341 |
interval != Interval::end(); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3342 |
interval = interval->next()) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3343 |
if (interval->current_to() > op->id() + 1) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3344 |
// This interval is live out of this op so make sure |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3345 |
// that this interval represents some value that's |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3346 |
// referenced by this op either as an input or output. |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3347 |
bool ok = false; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3348 |
for_each_visitor_mode(mode) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3349 |
int n = visitor.opr_count(mode); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3350 |
for (int k = 0; k < n; k++) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3351 |
LIR_Opr opr = visitor.opr_at(mode, k); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3352 |
if (opr->is_fixed_cpu()) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3353 |
if (interval_at(reg_num(opr)) == interval) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3354 |
ok = true; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3355 |
break; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3356 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3357 |
int hi = reg_numHi(opr); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3358 |
if (hi != -1 && interval_at(hi) == interval) { |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3359 |
ok = true; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3360 |
break; |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3361 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3362 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3363 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3364 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3365 |
assert(ok, "fixed intervals should never be live across an oopmap point"); |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3366 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3367 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3368 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3369 |
} |
865943584ecc
6828024: verification of fixed interval usage is too weak
never
parents:
1217
diff
changeset
|
3370 |
|
1 | 3371 |
// oop-maps at calls do not contain registers, so check is not needed |
3372 |
if (!visitor.has_call()) { |
|
3373 |
||
3374 |
for_each_visitor_mode(mode) { |
|
3375 |
int n = visitor.opr_count(mode); |
|
3376 |
for (int k = 0; k < n; k++) { |
|
3377 |
LIR_Opr opr = visitor.opr_at(mode, k); |
|
3378 |
||
3379 |
if (opr->is_fixed_cpu() && opr->is_oop()) { |
|
3380 |
// operand is a non-virtual cpu register and contains an oop |
|
3381 |
TRACE_LINEAR_SCAN(4, op->print_on(tty); tty->print("checking operand "); opr->print(); tty->cr()); |
|
3382 |
||
3383 |
Interval* interval = interval_at(reg_num(opr)); |
|
3384 |
assert(interval != NULL, "no interval"); |
|
3385 |
||
3386 |
if (mode == LIR_OpVisitState::inputMode) { |
|
3387 |
if (interval->to() >= op_id + 1) { |
|
3388 |
assert(interval->to() < op_id + 2 || |
|
3389 |
interval->has_hole_between(op_id, op_id + 2), |
|
3390 |
"oop input operand live after instruction"); |
|
3391 |
} |
|
3392 |
} else if (mode == LIR_OpVisitState::outputMode) { |
|
3393 |
if (interval->from() <= op_id - 1) { |
|
3394 |
assert(interval->has_hole_between(op_id - 1, op_id), |
|
3395 |
"oop input operand live after instruction"); |
|
3396 |
} |
|
3397 |
} |
|
3398 |
} |
|
3399 |
} |
|
3400 |
} |
|
3401 |
} |
|
3402 |
} |
|
3403 |
} |
|
3404 |
} |
|
3405 |
||
3406 |
||
3407 |
void LinearScan::verify_constants() { |
|
3408 |
int num_regs = num_virtual_regs(); |
|
3409 |
int size = live_set_size(); |
|
3410 |
int num_blocks = block_count(); |
|
3411 |
||
3412 |
for (int i = 0; i < num_blocks; i++) { |
|
3413 |
BlockBegin* block = block_at(i); |
|
38177 | 3414 |
ResourceBitMap live_at_edge = block->live_in(); |
1 | 3415 |
|
3416 |
// visit all registers where the live_at_edge bit is set |
|
1066 | 3417 |
for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) { |
1 | 3418 |
TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id())); |
3419 |
||
3420 |
Value value = gen()->instruction_for_vreg(r); |
|
3421 |
||
3422 |
assert(value != NULL, "all intervals live across block boundaries must have Value"); |
|
3423 |
assert(value->operand()->is_register() && value->operand()->is_virtual(), "value must have virtual operand"); |
|
3424 |
assert(value->operand()->vreg_number() == r, "register number must match"); |
|
3425 |
// TKR assert(value->as_Constant() == NULL || value->is_pinned(), "only pinned constants can be alive accross block boundaries"); |
|
3426 |
} |
|
3427 |
} |
|
3428 |
} |
|
3429 |
||
3430 |
||
3431 |
class RegisterVerifier: public StackObj { |
|
3432 |
private: |
|
3433 |
LinearScan* _allocator; |
|
3434 |
BlockList _work_list; // all blocks that must be processed |
|
3435 |
IntervalsList _saved_states; // saved information of previous check |
|
3436 |
||
3437 |
// simplified access to methods of LinearScan |
|
3438 |
Compilation* compilation() const { return _allocator->compilation(); } |
|
3439 |
Interval* interval_at(int reg_num) const { return _allocator->interval_at(reg_num); } |
|
3440 |
int reg_num(LIR_Opr opr) const { return _allocator->reg_num(opr); } |
|
3441 |
||
3442 |
// currently, only registers are processed |
|
3443 |
int state_size() { return LinearScan::nof_regs; } |
|
3444 |
||
3445 |
// accessors |
|
3446 |
IntervalList* state_for_block(BlockBegin* block) { return _saved_states.at(block->block_id()); } |
|
3447 |
void set_state_for_block(BlockBegin* block, IntervalList* saved_state) { _saved_states.at_put(block->block_id(), saved_state); } |
|
3448 |
void add_to_work_list(BlockBegin* block) { if (!_work_list.contains(block)) _work_list.append(block); } |
|
3449 |
||
3450 |
// helper functions |
|
3451 |
IntervalList* copy(IntervalList* input_state); |
|
3452 |
void state_put(IntervalList* input_state, int reg, Interval* interval); |
|
3453 |
bool check_state(IntervalList* input_state, int reg, Interval* interval); |
|
3454 |
||
3455 |
void process_block(BlockBegin* block); |
|
3456 |
void process_xhandler(XHandler* xhandler, IntervalList* input_state); |
|
3457 |
void process_successor(BlockBegin* block, IntervalList* input_state); |
|
3458 |
void process_operations(LIR_List* ops, IntervalList* input_state); |
|
3459 |
||
3460 |
public: |
|
3461 |
RegisterVerifier(LinearScan* allocator) |
|
3462 |
: _allocator(allocator) |
|
3463 |
, _work_list(16) |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
3464 |
, _saved_states(BlockBegin::number_of_blocks(), BlockBegin::number_of_blocks(), NULL) |
1 | 3465 |
{ } |
3466 |
||
3467 |
void verify(BlockBegin* start); |
|
3468 |
}; |
|
3469 |
||
3470 |
||
3471 |
// entry function from LinearScan that starts the verification |
|
3472 |
void LinearScan::verify_registers() { |
|
3473 |
RegisterVerifier verifier(this); |
|
3474 |
verifier.verify(block_at(0)); |
|
3475 |
} |
|
3476 |
||
3477 |
||
3478 |
void RegisterVerifier::verify(BlockBegin* start) { |
|
3479 |
// setup input registers (method arguments) for first block |
|
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
3480 |
int input_state_len = state_size(); |
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
3481 |
IntervalList* input_state = new IntervalList(input_state_len, input_state_len, NULL); |
1 | 3482 |
CallingConvention* args = compilation()->frame_map()->incoming_arguments(); |
3483 |
for (int n = 0; n < args->length(); n++) { |
|
3484 |
LIR_Opr opr = args->at(n); |
|
3485 |
if (opr->is_register()) { |
|
3486 |
Interval* interval = interval_at(reg_num(opr)); |
|
3487 |
||
3488 |
if (interval->assigned_reg() < state_size()) { |
|
3489 |
input_state->at_put(interval->assigned_reg(), interval); |
|
3490 |
} |
|
3491 |
if (interval->assigned_regHi() != LinearScan::any_reg && interval->assigned_regHi() < state_size()) { |
|
3492 |
input_state->at_put(interval->assigned_regHi(), interval); |
|
3493 |
} |
|
3494 |
} |
|
3495 |
} |
|
3496 |
||
3497 |
set_state_for_block(start, input_state); |
|
3498 |
add_to_work_list(start); |
|
3499 |
||
3500 |
// main loop for verification |
|
3501 |
do { |
|
3502 |
BlockBegin* block = _work_list.at(0); |
|
3503 |
_work_list.remove_at(0); |
|
3504 |
||
3505 |
process_block(block); |
|
3506 |
} while (!_work_list.is_empty()); |
|
3507 |
} |
|
3508 |
||
3509 |
void RegisterVerifier::process_block(BlockBegin* block) { |
|
3510 |
TRACE_LINEAR_SCAN(2, tty->cr(); tty->print_cr("process_block B%d", block->block_id())); |
|
3511 |
||
3512 |
// must copy state because it is modified |
|
3513 |
IntervalList* input_state = copy(state_for_block(block)); |
|
3514 |
||
3515 |
if (TraceLinearScanLevel >= 4) { |
|
3516 |
tty->print_cr("Input-State of intervals:"); |
|
3517 |
tty->print(" "); |
|
3518 |
for (int i = 0; i < state_size(); i++) { |
|
3519 |
if (input_state->at(i) != NULL) { |
|
3520 |
tty->print(" %4d", input_state->at(i)->reg_num()); |
|
3521 |
} else { |
|
3522 |
tty->print(" __"); |
|
3523 |
} |
|
3524 |
} |
|
3525 |
tty->cr(); |
|
3526 |
tty->cr(); |
|
3527 |
} |
|
3528 |
||
3529 |
// process all operations of the block |
|
3530 |
process_operations(block->lir(), input_state); |
|
3531 |
||
3532 |
// iterate all successors |
|
3533 |
for (int i = 0; i < block->number_of_sux(); i++) { |
|
3534 |
process_successor(block->sux_at(i), input_state); |
|
3535 |
} |
|
3536 |
} |
|
3537 |
||
3538 |
void RegisterVerifier::process_xhandler(XHandler* xhandler, IntervalList* input_state) { |
|
3539 |
TRACE_LINEAR_SCAN(2, tty->print_cr("process_xhandler B%d", xhandler->entry_block()->block_id())); |
|
3540 |
||
3541 |
// must copy state because it is modified |
|
3542 |
input_state = copy(input_state); |
|
3543 |
||
3544 |
if (xhandler->entry_code() != NULL) { |
|
3545 |
process_operations(xhandler->entry_code(), input_state); |
|
3546 |
} |
|
3547 |
process_successor(xhandler->entry_block(), input_state); |
|
3548 |
} |
|
3549 |
||
3550 |
void RegisterVerifier::process_successor(BlockBegin* block, IntervalList* input_state) { |
|
3551 |
IntervalList* saved_state = state_for_block(block); |
|
3552 |
||
3553 |
if (saved_state != NULL) { |
|
3554 |
// this block was already processed before. |
|
3555 |
// check if new input_state is consistent with saved_state |
|
3556 |
||
3557 |
bool saved_state_correct = true; |
|
3558 |
for (int i = 0; i < state_size(); i++) { |
|
3559 |
if (input_state->at(i) != saved_state->at(i)) { |
|
3560 |
// current input_state and previous saved_state assume a different |
|
3561 |
// interval in this register -> assume that this register is invalid |
|
3562 |
if (saved_state->at(i) != NULL) { |
|
3563 |
// invalidate old calculation only if it assumed that |
|
3564 |
// register was valid. when the register was already invalid, |
|
3565 |
// then the old calculation was correct. |
|
3566 |
saved_state_correct = false; |
|
3567 |
saved_state->at_put(i, NULL); |
|
3568 |
||
3569 |
TRACE_LINEAR_SCAN(4, tty->print_cr("process_successor B%d: invalidating slot %d", block->block_id(), i)); |
|
3570 |
} |
|
3571 |
} |
|
3572 |
} |
|
3573 |
||
3574 |
if (saved_state_correct) { |
|
3575 |
// already processed block with correct input_state |
|
3576 |
TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: previous visit already correct", block->block_id())); |
|
3577 |
} else { |
|
3578 |
// must re-visit this block |
|
3579 |
TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: must re-visit because input state changed", block->block_id())); |
|
3580 |
add_to_work_list(block); |
|
3581 |
} |
|
3582 |
||
3583 |
} else { |
|
3584 |
// block was not processed before, so set initial input_state |
|
3585 |
TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: initial visit", block->block_id())); |
|
3586 |
||
3587 |
set_state_for_block(block, copy(input_state)); |
|
3588 |
add_to_work_list(block); |
|
3589 |
} |
|
3590 |
} |
|
3591 |
||
3592 |
||
3593 |
IntervalList* RegisterVerifier::copy(IntervalList* input_state) { |
|
3594 |
IntervalList* copy_state = new IntervalList(input_state->length()); |
|
36302
23a79c43ba92
8067014: LinearScan::is_sorted significantly slows down fastdebug builds' performance
vlivanov
parents:
35540
diff
changeset
|
3595 |
copy_state->appendAll(input_state); |
1 | 3596 |
return copy_state; |
3597 |
} |
|
3598 |
||
3599 |
void RegisterVerifier::state_put(IntervalList* input_state, int reg, Interval* interval) { |
|
3600 |
if (reg != LinearScan::any_reg && reg < state_size()) { |
|
3601 |
if (interval != NULL) { |
|
3602 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" reg[%d] = %d", reg, interval->reg_num())); |
|
3603 |
} else if (input_state->at(reg) != NULL) { |
|
3604 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" reg[%d] = NULL", reg)); |
|
3605 |
} |
|
3606 |
||
3607 |
input_state->at_put(reg, interval); |
|
3608 |
} |
|
3609 |
} |
|
3610 |
||
3611 |
bool RegisterVerifier::check_state(IntervalList* input_state, int reg, Interval* interval) { |
|
3612 |
if (reg != LinearScan::any_reg && reg < state_size()) { |
|
3613 |
if (input_state->at(reg) != interval) { |
|
3614 |
tty->print_cr("!! Error in register allocation: register %d does not contain interval %d", reg, interval->reg_num()); |
|
3615 |
return true; |
|
3616 |
} |
|
3617 |
} |
|
3618 |
return false; |
|
3619 |
} |
|
3620 |
||
3621 |
void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_state) { |
|
3622 |
// visit all instructions of the block |
|
3623 |
LIR_OpVisitState visitor; |
|
3624 |
bool has_error = false; |
|
3625 |
||
3626 |
for (int i = 0; i < ops->length(); i++) { |
|
3627 |
LIR_Op* op = ops->at(i); |
|
3628 |
visitor.visit(op); |
|
3629 |
||
3630 |
TRACE_LINEAR_SCAN(4, op->print_on(tty)); |
|
3631 |
||
3632 |
// check if input operands are correct |
|
3633 |
int j; |
|
3634 |
int n = visitor.opr_count(LIR_OpVisitState::inputMode); |
|
3635 |
for (j = 0; j < n; j++) { |
|
3636 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, j); |
|
3637 |
if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) { |
|
3638 |
Interval* interval = interval_at(reg_num(opr)); |
|
3639 |
if (op->id() != -1) { |
|
3640 |
interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::inputMode); |
|
3641 |
} |
|
3642 |
||
3643 |
has_error |= check_state(input_state, interval->assigned_reg(), interval->split_parent()); |
|
3644 |
has_error |= check_state(input_state, interval->assigned_regHi(), interval->split_parent()); |
|
3645 |
||
3646 |
// When an operand is marked with is_last_use, then the fpu stack allocator |
|
3647 |
// removes the register from the fpu stack -> the register contains no value |
|
3648 |
if (opr->is_last_use()) { |
|
3649 |
state_put(input_state, interval->assigned_reg(), NULL); |
|
3650 |
state_put(input_state, interval->assigned_regHi(), NULL); |
|
3651 |
} |
|
3652 |
} |
|
3653 |
} |
|
3654 |
||
3655 |
// invalidate all caller save registers at calls |
|
3656 |
if (visitor.has_call()) { |
|
7427 | 3657 |
for (j = 0; j < FrameMap::nof_caller_save_cpu_regs(); j++) { |
1 | 3658 |
state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL); |
3659 |
} |
|
3660 |
for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) { |
|
3661 |
state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL); |
|
3662 |
} |
|
3663 |
||
1066 | 3664 |
#ifdef X86 |
30624 | 3665 |
int num_caller_save_xmm_regs = FrameMap::get_num_caller_save_xmms(); |
3666 |
for (j = 0; j < num_caller_save_xmm_regs; j++) { |
|
1 | 3667 |
state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL); |
3668 |
} |
|
3669 |
#endif |
|
3670 |
} |
|
3671 |
||
3672 |
// process xhandler before output and temp operands |
|
3673 |
XHandlers* xhandlers = visitor.all_xhandler(); |
|
3674 |
n = xhandlers->length(); |
|
3675 |
for (int k = 0; k < n; k++) { |
|
3676 |
process_xhandler(xhandlers->handler_at(k), input_state); |
|
3677 |
} |
|
3678 |
||
3679 |
// set temp operands (some operations use temp operands also as output operands, so can't set them NULL) |
|
3680 |
n = visitor.opr_count(LIR_OpVisitState::tempMode); |
|
3681 |
for (j = 0; j < n; j++) { |
|
3682 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, j); |
|
3683 |
if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) { |
|
3684 |
Interval* interval = interval_at(reg_num(opr)); |
|
3685 |
if (op->id() != -1) { |
|
3686 |
interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::tempMode); |
|
3687 |
} |
|
3688 |
||
3689 |
state_put(input_state, interval->assigned_reg(), interval->split_parent()); |
|
3690 |
state_put(input_state, interval->assigned_regHi(), interval->split_parent()); |
|
3691 |
} |
|
3692 |
} |
|
3693 |
||
3694 |
// set output operands |
|
3695 |
n = visitor.opr_count(LIR_OpVisitState::outputMode); |
|
3696 |
for (j = 0; j < n; j++) { |
|
3697 |
LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, j); |
|
3698 |
if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) { |
|
3699 |
Interval* interval = interval_at(reg_num(opr)); |
|
3700 |
if (op->id() != -1) { |
|
3701 |
interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::outputMode); |
|
3702 |
} |
|
3703 |
||
3704 |
state_put(input_state, interval->assigned_reg(), interval->split_parent()); |
|
3705 |
state_put(input_state, interval->assigned_regHi(), interval->split_parent()); |
|
3706 |
} |
|
3707 |
} |
|
3708 |
} |
|
3709 |
assert(has_error == false, "Error in register allocation"); |
|
3710 |
} |
|
3711 |
||
3712 |
#endif // ASSERT |
|
3713 |
||
3714 |
||
3715 |
||
3716 |
// **** Implementation of MoveResolver ****************************** |
|
3717 |
||
3718 |
MoveResolver::MoveResolver(LinearScan* allocator) : |
|
3719 |
_allocator(allocator), |
|
3720 |
_multiple_reads_allowed(false), |
|
3721 |
_mapping_from(8), |
|
3722 |
_mapping_from_opr(8), |
|
3723 |
_mapping_to(8), |
|
3724 |
_insert_list(NULL), |
|
3725 |
_insert_idx(-1), |
|
3726 |
_insertion_buffer() |
|
3727 |
{ |
|
3728 |
for (int i = 0; i < LinearScan::nof_regs; i++) { |
|
3729 |
_register_blocked[i] = 0; |
|
3730 |
} |
|
3731 |
DEBUG_ONLY(check_empty()); |
|
3732 |
} |
|
3733 |
||
3734 |
||
3735 |
#ifdef ASSERT |
|
3736 |
||
3737 |
void MoveResolver::check_empty() { |
|
3738 |
assert(_mapping_from.length() == 0 && _mapping_from_opr.length() == 0 && _mapping_to.length() == 0, "list must be empty before and after processing"); |
|
3739 |
for (int i = 0; i < LinearScan::nof_regs; i++) { |
|
3740 |
assert(register_blocked(i) == 0, "register map must be empty before and after processing"); |
|
3741 |
} |
|
3742 |
assert(_multiple_reads_allowed == false, "must have default value"); |
|
3743 |
} |
|
3744 |
||
3745 |
void MoveResolver::verify_before_resolve() { |
|
3746 |
assert(_mapping_from.length() == _mapping_from_opr.length(), "length must be equal"); |
|
3747 |
assert(_mapping_from.length() == _mapping_to.length(), "length must be equal"); |
|
3748 |
assert(_insert_list != NULL && _insert_idx != -1, "insert position not set"); |
|
3749 |
||
3750 |
int i, j; |
|
3751 |
if (!_multiple_reads_allowed) { |
|
3752 |
for (i = 0; i < _mapping_from.length(); i++) { |
|
3753 |
for (j = i + 1; j < _mapping_from.length(); j++) { |
|
3754 |
assert(_mapping_from.at(i) == NULL || _mapping_from.at(i) != _mapping_from.at(j), "cannot read from same interval twice"); |
|
3755 |
} |
|
3756 |
} |
|
3757 |
} |
|
3758 |
||
3759 |
for (i = 0; i < _mapping_to.length(); i++) { |
|
3760 |
for (j = i + 1; j < _mapping_to.length(); j++) { |
|
3761 |
assert(_mapping_to.at(i) != _mapping_to.at(j), "cannot write to same interval twice"); |
|
3762 |
} |
|
3763 |
} |
|
3764 |
||
3765 |
||
38177 | 3766 |
ResourceBitMap used_regs(LinearScan::nof_regs + allocator()->frame_map()->argcount() + allocator()->max_spills()); |
1 | 3767 |
if (!_multiple_reads_allowed) { |
3768 |
for (i = 0; i < _mapping_from.length(); i++) { |
|
3769 |
Interval* it = _mapping_from.at(i); |
|
3770 |
if (it != NULL) { |
|
3771 |
assert(!used_regs.at(it->assigned_reg()), "cannot read from same register twice"); |
|
3772 |
used_regs.set_bit(it->assigned_reg()); |
|
3773 |
||
3774 |
if (it->assigned_regHi() != LinearScan::any_reg) { |
|
3775 |
assert(!used_regs.at(it->assigned_regHi()), "cannot read from same register twice"); |
|
3776 |
used_regs.set_bit(it->assigned_regHi()); |
|
3777 |
} |
|
3778 |
} |
|
3779 |
} |
|
3780 |
} |
|
3781 |
||
3782 |
used_regs.clear(); |
|
3783 |
for (i = 0; i < _mapping_to.length(); i++) { |
|
3784 |
Interval* it = _mapping_to.at(i); |
|
3785 |
assert(!used_regs.at(it->assigned_reg()), "cannot write to same register twice"); |
|
3786 |
used_regs.set_bit(it->assigned_reg()); |
|
3787 |
||
3788 |
if (it->assigned_regHi() != LinearScan::any_reg) { |
|
3789 |
assert(!used_regs.at(it->assigned_regHi()), "cannot write to same register twice"); |
|
3790 |
used_regs.set_bit(it->assigned_regHi()); |
|
3791 |
} |
|
3792 |
} |
|
3793 |
||
3794 |
used_regs.clear(); |
|
3795 |
for (i = 0; i < _mapping_from.length(); i++) { |
|
3796 |
Interval* it = _mapping_from.at(i); |
|
3797 |
if (it != NULL && it->assigned_reg() >= LinearScan::nof_regs) { |
|
3798 |
used_regs.set_bit(it->assigned_reg()); |
|
3799 |
} |
|
3800 |
} |
|
3801 |
for (i = 0; i < _mapping_to.length(); i++) { |
|
3802 |
Interval* it = _mapping_to.at(i); |
|
3803 |
assert(!used_regs.at(it->assigned_reg()) || it->assigned_reg() == _mapping_from.at(i)->assigned_reg(), "stack slots used in _mapping_from must be disjoint to _mapping_to"); |
|
3804 |
} |
|
3805 |
} |
|
3806 |
||
3807 |
#endif // ASSERT |
|
3808 |
||
3809 |
||
3810 |
// mark assigned_reg and assigned_regHi of the interval as blocked |
|
3811 |
void MoveResolver::block_registers(Interval* it) { |
|
3812 |
int reg = it->assigned_reg(); |
|
3813 |
if (reg < LinearScan::nof_regs) { |
|
3814 |
assert(_multiple_reads_allowed || register_blocked(reg) == 0, "register already marked as used"); |
|
3815 |
set_register_blocked(reg, 1); |
|
3816 |
} |
|
3817 |
reg = it->assigned_regHi(); |
|
3818 |
if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) { |
|
3819 |
assert(_multiple_reads_allowed || register_blocked(reg) == 0, "register already marked as used"); |
|
3820 |
set_register_blocked(reg, 1); |
|
3821 |
} |
|
3822 |
} |
|
3823 |
||
3824 |
// mark assigned_reg and assigned_regHi of the interval as unblocked |
|
3825 |
void MoveResolver::unblock_registers(Interval* it) { |
|
3826 |
int reg = it->assigned_reg(); |
|
3827 |
if (reg < LinearScan::nof_regs) { |
|
3828 |
assert(register_blocked(reg) > 0, "register already marked as unused"); |
|
3829 |
set_register_blocked(reg, -1); |
|
3830 |
} |
|
3831 |
reg = it->assigned_regHi(); |
|
3832 |
if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) { |
|
3833 |
assert(register_blocked(reg) > 0, "register already marked as unused"); |
|
3834 |
set_register_blocked(reg, -1); |
|
3835 |
} |
|
3836 |
} |
|
3837 |
||
3838 |
// check if assigned_reg and assigned_regHi of the to-interval are not blocked (or only blocked by from) |
|
3839 |
bool MoveResolver::save_to_process_move(Interval* from, Interval* to) { |
|
3840 |
int from_reg = -1; |
|
3841 |
int from_regHi = -1; |
|
3842 |
if (from != NULL) { |
|
3843 |
from_reg = from->assigned_reg(); |
|
3844 |
from_regHi = from->assigned_regHi(); |
|
3845 |
} |
|
3846 |
||
3847 |
int reg = to->assigned_reg(); |
|
3848 |
if (reg < LinearScan::nof_regs) { |
|
3849 |
if (register_blocked(reg) > 1 || (register_blocked(reg) == 1 && reg != from_reg && reg != from_regHi)) { |
|
3850 |
return false; |
|
3851 |
} |
|
3852 |
} |
|
3853 |
reg = to->assigned_regHi(); |
|
3854 |
if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) { |
|
3855 |
if (register_blocked(reg) > 1 || (register_blocked(reg) == 1 && reg != from_reg && reg != from_regHi)) { |
|
3856 |
return false; |
|
3857 |
} |
|
3858 |
} |
|
3859 |
||
3860 |
return true; |
|
3861 |
} |
|
3862 |
||
3863 |
||
3864 |
void MoveResolver::create_insertion_buffer(LIR_List* list) { |
|
3865 |
assert(!_insertion_buffer.initialized(), "overwriting existing buffer"); |
|
3866 |
_insertion_buffer.init(list); |
|
3867 |
} |
|
3868 |
||
3869 |
void MoveResolver::append_insertion_buffer() { |
|
3870 |
if (_insertion_buffer.initialized()) { |
|
3871 |
_insertion_buffer.lir_list()->append(&_insertion_buffer); |
|
3872 |
} |
|
3873 |
assert(!_insertion_buffer.initialized(), "must be uninitialized now"); |
|
3874 |
||
3875 |
_insert_list = NULL; |
|
3876 |
_insert_idx = -1; |
|
3877 |
} |
|
3878 |
||
3879 |
void MoveResolver::insert_move(Interval* from_interval, Interval* to_interval) { |
|
3880 |
assert(from_interval->reg_num() != to_interval->reg_num(), "from and to interval equal"); |
|
3881 |
assert(from_interval->type() == to_interval->type(), "move between different types"); |
|
3882 |
assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first"); |
|
3883 |
assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer"); |
|
3884 |
||
3885 |
LIR_Opr from_opr = LIR_OprFact::virtual_register(from_interval->reg_num(), from_interval->type()); |
|
3886 |
LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type()); |
|
3887 |
||
3888 |
if (!_multiple_reads_allowed) { |
|
3889 |
// the last_use flag is an optimization for FPU stack allocation. When the same |
|
3890 |
// input interval is used in more than one move, then it is too difficult to determine |
|
3891 |
// if this move is really the last use. |
|
3892 |
from_opr = from_opr->make_last_use(); |
|
3893 |
} |
|
3894 |
_insertion_buffer.move(_insert_idx, from_opr, to_opr); |
|
3895 |
||
3896 |
TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: inserted move from register %d (%d, %d) to %d (%d, %d)", from_interval->reg_num(), from_interval->assigned_reg(), from_interval->assigned_regHi(), to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi())); |
|
3897 |
} |
|
3898 |
||
3899 |
void MoveResolver::insert_move(LIR_Opr from_opr, Interval* to_interval) { |
|
3900 |
assert(from_opr->type() == to_interval->type(), "move between different types"); |
|
3901 |
assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first"); |
|
3902 |
assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer"); |
|
3903 |
||
3904 |
LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type()); |
|
3905 |
_insertion_buffer.move(_insert_idx, from_opr, to_opr); |
|
3906 |
||
3907 |
TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: inserted move from constant "); from_opr->print(); tty->print_cr(" to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi())); |
|
3908 |
} |
|
3909 |
||
3910 |
||
3911 |
void MoveResolver::resolve_mappings() { |
|
3912 |
TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: resolving mappings for Block B%d, index %d", _insert_list->block() != NULL ? _insert_list->block()->block_id() : -1, _insert_idx)); |
|
3913 |
DEBUG_ONLY(verify_before_resolve()); |
|
3914 |
||
3915 |
// Block all registers that are used as input operands of a move. |
|
3916 |
// When a register is blocked, no move to this register is emitted. |
|
3917 |
// This is necessary for detecting cycles in moves. |
|
3918 |
int i; |
|
3919 |
for (i = _mapping_from.length() - 1; i >= 0; i--) { |
|
3920 |
Interval* from_interval = _mapping_from.at(i); |
|
3921 |
if (from_interval != NULL) { |
|
3922 |
block_registers(from_interval); |
|
3923 |
} |
|
3924 |
} |
|
3925 |
||
3926 |
int spill_candidate = -1; |
|
3927 |
while (_mapping_from.length() > 0) { |
|
3928 |
bool processed_interval = false; |
|
3929 |
||
3930 |
for (i = _mapping_from.length() - 1; i >= 0; i--) { |
|
3931 |
Interval* from_interval = _mapping_from.at(i); |
|
3932 |
Interval* to_interval = _mapping_to.at(i); |
|
3933 |
||
3934 |
if (save_to_process_move(from_interval, to_interval)) { |
|
3935 |
// this inverval can be processed because target is free |
|
3936 |
if (from_interval != NULL) { |
|
3937 |
insert_move(from_interval, to_interval); |
|
3938 |
unblock_registers(from_interval); |
|
3939 |
} else { |
|
3940 |
insert_move(_mapping_from_opr.at(i), to_interval); |
|
3941 |
} |
|
3942 |
_mapping_from.remove_at(i); |
|
3943 |
_mapping_from_opr.remove_at(i); |
|
3944 |
_mapping_to.remove_at(i); |
|
3945 |
||
3946 |
processed_interval = true; |
|
3947 |
} else if (from_interval != NULL && from_interval->assigned_reg() < LinearScan::nof_regs) { |
|
3948 |
// this interval cannot be processed now because target is not free |
|
3949 |
// it starts in a register, so it is a possible candidate for spilling |
|
3950 |
spill_candidate = i; |
|
3951 |
} |
|
3952 |
} |
|
3953 |
||
3954 |
if (!processed_interval) { |
|
3955 |
// no move could be processed because there is a cycle in the move list |
|
3956 |
// (e.g. r1 -> r2, r2 -> r1), so one interval must be spilled to memory |
|
3957 |
assert(spill_candidate != -1, "no interval in register for spilling found"); |
|
3958 |
||
3959 |
// create a new spill interval and assign a stack slot to it |
|
3960 |
Interval* from_interval = _mapping_from.at(spill_candidate); |
|
3961 |
Interval* spill_interval = new Interval(-1); |
|
3962 |
spill_interval->set_type(from_interval->type()); |
|
3963 |
||
3964 |
// add a dummy range because real position is difficult to calculate |
|
3965 |
// Note: this range is a special case when the integrity of the allocation is checked |
|
3966 |
spill_interval->add_range(1, 2); |
|
3967 |
||
3968 |
// do not allocate a new spill slot for temporary interval, but |
|
3969 |
// use spill slot assigned to from_interval. Otherwise moves from |
|
3970 |
// one stack slot to another can happen (not allowed by LIR_Assembler |
|
3971 |
int spill_slot = from_interval->canonical_spill_slot(); |
|
3972 |
if (spill_slot < 0) { |
|
3973 |
spill_slot = allocator()->allocate_spill_slot(type2spill_size[spill_interval->type()] == 2); |
|
3974 |
from_interval->set_canonical_spill_slot(spill_slot); |
|
3975 |
} |
|
3976 |
spill_interval->assign_reg(spill_slot); |
|
3977 |
allocator()->append_interval(spill_interval); |
|
3978 |
||
3979 |
TRACE_LINEAR_SCAN(4, tty->print_cr("created new Interval %d for spilling", spill_interval->reg_num())); |
|
3980 |
||
3981 |
// insert a move from register to stack and update the mapping |
|
3982 |
insert_move(from_interval, spill_interval); |
|
3983 |
_mapping_from.at_put(spill_candidate, spill_interval); |
|
3984 |
unblock_registers(from_interval); |
|
3985 |
} |
|
3986 |
} |
|
3987 |
||
3988 |
// reset to default value |
|
3989 |
_multiple_reads_allowed = false; |
|
3990 |
||
3991 |
// check that all intervals have been processed |
|
3992 |
DEBUG_ONLY(check_empty()); |
|
3993 |
} |
|
3994 |
||
3995 |
||
3996 |
void MoveResolver::set_insert_position(LIR_List* insert_list, int insert_idx) { |
|
3997 |
TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: setting insert position to Block B%d, index %d", insert_list->block() != NULL ? insert_list->block()->block_id() : -1, insert_idx)); |
|
3998 |
assert(_insert_list == NULL && _insert_idx == -1, "use move_insert_position instead of set_insert_position when data already set"); |
|
3999 |
||
4000 |
create_insertion_buffer(insert_list); |
|
4001 |
_insert_list = insert_list; |
|
4002 |
_insert_idx = insert_idx; |
|
4003 |
} |
|
4004 |
||
4005 |
void MoveResolver::move_insert_position(LIR_List* insert_list, int insert_idx) { |
|
4006 |
TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: moving insert position to Block B%d, index %d", insert_list->block() != NULL ? insert_list->block()->block_id() : -1, insert_idx)); |
|
4007 |
||
4008 |
if (_insert_list != NULL && (insert_list != _insert_list || insert_idx != _insert_idx)) { |
|
4009 |
// insert position changed -> resolve current mappings |
|
4010 |
resolve_mappings(); |
|
4011 |
} |
|
4012 |
||
4013 |
if (insert_list != _insert_list) { |
|
4014 |
// block changed -> append insertion_buffer because it is |
|
4015 |
// bound to a specific block and create a new insertion_buffer |
|
4016 |
append_insertion_buffer(); |
|
4017 |
create_insertion_buffer(insert_list); |
|
4018 |
} |
|
4019 |
||
4020 |
_insert_list = insert_list; |
|
4021 |
_insert_idx = insert_idx; |
|
4022 |
} |
|
4023 |
||
4024 |
void MoveResolver::add_mapping(Interval* from_interval, Interval* to_interval) { |
|
4025 |
TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: adding mapping from %d (%d, %d) to %d (%d, %d)", from_interval->reg_num(), from_interval->assigned_reg(), from_interval->assigned_regHi(), to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi())); |
|
4026 |
||
4027 |
_mapping_from.append(from_interval); |
|
4028 |
_mapping_from_opr.append(LIR_OprFact::illegalOpr); |
|
4029 |
_mapping_to.append(to_interval); |
|
4030 |
} |
|
4031 |
||
4032 |
||
4033 |
void MoveResolver::add_mapping(LIR_Opr from_opr, Interval* to_interval) { |
|
4034 |
TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: adding mapping from "); from_opr->print(); tty->print_cr(" to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi())); |
|
4035 |
assert(from_opr->is_constant(), "only for constants"); |
|
4036 |
||
4037 |
_mapping_from.append(NULL); |
|
4038 |
_mapping_from_opr.append(from_opr); |
|
4039 |
_mapping_to.append(to_interval); |
|
4040 |
} |
|
4041 |
||
4042 |
void MoveResolver::resolve_and_append_moves() { |
|
4043 |
if (has_mappings()) { |
|
4044 |
resolve_mappings(); |
|
4045 |
} |
|
4046 |
append_insertion_buffer(); |
|
4047 |
} |
|
4048 |
||
4049 |
||
4050 |
||
4051 |
// **** Implementation of Range ************************************* |
|
4052 |
||
4053 |
Range::Range(int from, int to, Range* next) : |
|
4054 |
_from(from), |
|
4055 |
_to(to), |
|
4056 |
_next(next) |
|
4057 |
{ |
|
4058 |
} |
|
4059 |
||
4060 |
// initialize sentinel |
|
4061 |
Range* Range::_end = NULL; |
|
5707 | 4062 |
void Range::initialize(Arena* arena) { |
4063 |
_end = new (arena) Range(max_jint, max_jint, NULL); |
|
1 | 4064 |
} |
4065 |
||
4066 |
int Range::intersects_at(Range* r2) const { |
|
4067 |
const Range* r1 = this; |
|
4068 |
||
4069 |
assert(r1 != NULL && r2 != NULL, "null ranges not allowed"); |
|
4070 |
assert(r1 != _end && r2 != _end, "empty ranges not allowed"); |
|
4071 |
||
4072 |
do { |
|
4073 |
if (r1->from() < r2->from()) { |
|
4074 |
if (r1->to() <= r2->from()) { |
|
4075 |
r1 = r1->next(); if (r1 == _end) return -1; |
|
4076 |
} else { |
|
4077 |
return r2->from(); |
|
4078 |
} |
|
4079 |
} else if (r2->from() < r1->from()) { |
|
4080 |
if (r2->to() <= r1->from()) { |
|
4081 |
r2 = r2->next(); if (r2 == _end) return -1; |
|
4082 |
} else { |
|
4083 |
return r1->from(); |
|
4084 |
} |
|
4085 |
} else { // r1->from() == r2->from() |
|
4086 |
if (r1->from() == r1->to()) { |
|
4087 |
r1 = r1->next(); if (r1 == _end) return -1; |
|
4088 |
} else if (r2->from() == r2->to()) { |
|
4089 |
r2 = r2->next(); if (r2 == _end) return -1; |
|
4090 |
} else { |
|
4091 |
return r1->from(); |
|
4092 |
} |
|
4093 |
} |
|
4094 |
} while (true); |
|
4095 |
} |
|
4096 |
||
4097 |
#ifndef PRODUCT |
|
4098 |
void Range::print(outputStream* out) const { |
|
4099 |
out->print("[%d, %d[ ", _from, _to); |
|
4100 |
} |
|
4101 |
#endif |
|
4102 |
||
4103 |
||
4104 |
||
4105 |
// **** Implementation of Interval ********************************** |
|
4106 |
||
4107 |
// initialize sentinel |
|
4108 |
Interval* Interval::_end = NULL; |
|
5707 | 4109 |
void Interval::initialize(Arena* arena) { |
4110 |
Range::initialize(arena); |
|
4111 |
_end = new (arena) Interval(-1); |
|
1 | 4112 |
} |
4113 |
||
4114 |
Interval::Interval(int reg_num) : |
|
4115 |
_reg_num(reg_num), |
|
4116 |
_type(T_ILLEGAL), |
|
4117 |
_first(Range::end()), |
|
4118 |
_use_pos_and_kinds(12), |
|
4119 |
_current(Range::end()), |
|
4120 |
_next(_end), |
|
4121 |
_state(invalidState), |
|
4122 |
_assigned_reg(LinearScan::any_reg), |
|
4123 |
_assigned_regHi(LinearScan::any_reg), |
|
4124 |
_cached_to(-1), |
|
4125 |
_cached_opr(LIR_OprFact::illegalOpr), |
|
4126 |
_cached_vm_reg(VMRegImpl::Bad()), |
|
4127 |
_split_children(0), |
|
4128 |
_canonical_spill_slot(-1), |
|
4129 |
_insert_move_when_activated(false), |
|
4130 |
_register_hint(NULL), |
|
4131 |
_spill_state(noDefinitionFound), |
|
4132 |
_spill_definition_pos(-1) |
|
4133 |
{ |
|
4134 |
_split_parent = this; |
|
4135 |
_current_split_child = this; |
|
4136 |
} |
|
4137 |
||
4138 |
int Interval::calc_to() { |
|
4139 |
assert(_first != Range::end(), "interval has no range"); |
|
4140 |
||
4141 |
Range* r = _first; |
|
4142 |
while (r->next() != Range::end()) { |
|
4143 |
r = r->next(); |
|
4144 |
} |
|
4145 |
return r->to(); |
|
4146 |
} |
|
4147 |
||
4148 |
||
4149 |
#ifdef ASSERT |
|
4150 |
// consistency check of split-children |
|
4151 |
void Interval::check_split_children() { |
|
4152 |
if (_split_children.length() > 0) { |
|
4153 |
assert(is_split_parent(), "only split parents can have children"); |
|
4154 |
||
4155 |
for (int i = 0; i < _split_children.length(); i++) { |
|
4156 |
Interval* i1 = _split_children.at(i); |
|
4157 |
||
4158 |
assert(i1->split_parent() == this, "not a split child of this interval"); |
|
4159 |
assert(i1->type() == type(), "must be equal for all split children"); |
|
4160 |
assert(i1->canonical_spill_slot() == canonical_spill_slot(), "must be equal for all split children"); |
|
4161 |
||
4162 |
for (int j = i + 1; j < _split_children.length(); j++) { |
|
4163 |
Interval* i2 = _split_children.at(j); |
|
4164 |
||
4165 |
assert(i1->reg_num() != i2->reg_num(), "same register number"); |
|
4166 |
||
4167 |
if (i1->from() < i2->from()) { |
|
4168 |
assert(i1->to() <= i2->from() && i1->to() < i2->to(), "intervals overlapping"); |
|
4169 |
} else { |
|
4170 |
assert(i2->from() < i1->from(), "intervals start at same op_id"); |
|
4171 |
assert(i2->to() <= i1->from() && i2->to() < i1->to(), "intervals overlapping"); |
|
4172 |
} |
|
4173 |
} |
|
4174 |
} |
|
4175 |
} |
|
4176 |
} |
|
4177 |
#endif // ASSERT |
|
4178 |
||
4179 |
Interval* Interval::register_hint(bool search_split_child) const { |
|
4180 |
if (!search_split_child) { |
|
4181 |
return _register_hint; |
|
4182 |
} |
|
4183 |
||
4184 |
if (_register_hint != NULL) { |
|
4185 |
assert(_register_hint->is_split_parent(), "ony split parents are valid hint registers"); |
|
4186 |
||
4187 |
if (_register_hint->assigned_reg() >= 0 && _register_hint->assigned_reg() < LinearScan::nof_regs) { |
|
4188 |
return _register_hint; |
|
4189 |
||
4190 |
} else if (_register_hint->_split_children.length() > 0) { |
|
4191 |
// search the first split child that has a register assigned |
|
4192 |
int len = _register_hint->_split_children.length(); |
|
4193 |
for (int i = 0; i < len; i++) { |
|
4194 |
Interval* cur = _register_hint->_split_children.at(i); |
|
4195 |
||
4196 |
if (cur->assigned_reg() >= 0 && cur->assigned_reg() < LinearScan::nof_regs) { |
|
4197 |
return cur; |
|
4198 |
} |
|
4199 |
} |
|
4200 |
} |
|
4201 |
} |
|
4202 |
||
4203 |
// no hint interval found that has a register assigned |
|
4204 |
return NULL; |
|
4205 |
} |
|
4206 |
||
4207 |
||
4208 |
Interval* Interval::split_child_at_op_id(int op_id, LIR_OpVisitState::OprMode mode) { |
|
4209 |
assert(is_split_parent(), "can only be called for split parents"); |
|
4210 |
assert(op_id >= 0, "invalid op_id (method can not be called for spill moves)"); |
|
4211 |
||
4212 |
Interval* result; |
|
4213 |
if (_split_children.length() == 0) { |
|
4214 |
result = this; |
|
4215 |
} else { |
|
4216 |
result = NULL; |
|
4217 |
int len = _split_children.length(); |
|
4218 |
||
4219 |
// in outputMode, the end of the interval (op_id == cur->to()) is not valid |
|
4220 |
int to_offset = (mode == LIR_OpVisitState::outputMode ? 0 : 1); |
|
4221 |
||
4222 |
int i; |
|
4223 |
for (i = 0; i < len; i++) { |
|
4224 |
Interval* cur = _split_children.at(i); |
|
4225 |
if (cur->from() <= op_id && op_id < cur->to() + to_offset) { |
|
4226 |
if (i > 0) { |
|
4227 |
// exchange current split child to start of list (faster access for next call) |
|
4228 |
_split_children.at_put(i, _split_children.at(0)); |
|
4229 |
_split_children.at_put(0, cur); |
|
4230 |
} |
|
4231 |
||
4232 |
// interval found |
|
4233 |
result = cur; |
|
4234 |
break; |
|
4235 |
} |
|
4236 |
} |
|
4237 |
||
4238 |
#ifdef ASSERT |
|
4239 |
for (i = 0; i < len; i++) { |
|
4240 |
Interval* tmp = _split_children.at(i); |
|
4241 |
if (tmp != result && tmp->from() <= op_id && op_id < tmp->to() + to_offset) { |
|
4242 |
tty->print_cr("two valid result intervals found for op_id %d: %d and %d", op_id, result->reg_num(), tmp->reg_num()); |
|
4243 |
result->print(); |
|
4244 |
tmp->print(); |
|
4245 |
assert(false, "two valid result intervals found"); |
|
4246 |
} |
|
4247 |
} |
|
4248 |
#endif |
|
4249 |
} |
|
4250 |
||
4251 |
assert(result != NULL, "no matching interval found"); |
|
4252 |
assert(result->covers(op_id, mode), "op_id not covered by interval"); |
|
4253 |
||
4254 |
return result; |
|
4255 |
} |
|
4256 |
||
4257 |
||
4258 |
// returns the last split child that ends before the given op_id |
|
4259 |
Interval* Interval::split_child_before_op_id(int op_id) { |
|
4260 |
assert(op_id >= 0, "invalid op_id"); |
|
4261 |
||
4262 |
Interval* parent = split_parent(); |
|
4263 |
Interval* result = NULL; |
|
4264 |
||
4265 |
int len = parent->_split_children.length(); |
|
4266 |
assert(len > 0, "no split children available"); |
|
4267 |
||
4268 |
for (int i = len - 1; i >= 0; i--) { |
|
4269 |
Interval* cur = parent->_split_children.at(i); |
|
4270 |
if (cur->to() <= op_id && (result == NULL || result->to() < cur->to())) { |
|
4271 |
result = cur; |
|
4272 |
} |
|
4273 |
} |
|
4274 |
||
4275 |
assert(result != NULL, "no split child found"); |
|
4276 |
return result; |
|
4277 |
} |
|
4278 |
||
4279 |
||
4280 |
// checks if op_id is covered by any split child |
|
4281 |
bool Interval::split_child_covers(int op_id, LIR_OpVisitState::OprMode mode) { |
|
4282 |
assert(is_split_parent(), "can only be called for split parents"); |
|
4283 |
assert(op_id >= 0, "invalid op_id (method can not be called for spill moves)"); |
|
4284 |
||
4285 |
if (_split_children.length() == 0) { |
|
4286 |
// simple case if interval was not split |
|
4287 |
return covers(op_id, mode); |
|
4288 |
||
4289 |
} else { |
|
4290 |
// extended case: check all split children |
|
4291 |
int len = _split_children.length(); |
|
4292 |
for (int i = 0; i < len; i++) { |
|
4293 |
Interval* cur = _split_children.at(i); |
|
4294 |
if (cur->covers(op_id, mode)) { |
|
4295 |
return true; |
|
4296 |
} |
|
4297 |
} |
|
4298 |
return false; |
|
4299 |
} |
|
4300 |
} |
|
4301 |
||
4302 |
||
4303 |
// Note: use positions are sorted descending -> first use has highest index |
|
4304 |
int Interval::first_usage(IntervalUseKind min_use_kind) const { |
|
4305 |
assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals"); |
|
4306 |
||
4307 |
for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) { |
|
4308 |
if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) { |
|
4309 |
return _use_pos_and_kinds.at(i); |
|
4310 |
} |
|
4311 |
} |
|
4312 |
return max_jint; |
|
4313 |
} |
|
4314 |
||
4315 |
int Interval::next_usage(IntervalUseKind min_use_kind, int from) const { |
|
4316 |
assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals"); |
|
4317 |
||
4318 |
for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) { |
|
4319 |
if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) >= min_use_kind) { |
|
4320 |
return _use_pos_and_kinds.at(i); |
|
4321 |
} |
|
4322 |
} |
|
4323 |
return max_jint; |
|
4324 |
} |
|
4325 |
||
4326 |
int Interval::next_usage_exact(IntervalUseKind exact_use_kind, int from) const { |
|
4327 |
assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals"); |
|
4328 |
||
4329 |
for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) { |
|
4330 |
if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) == exact_use_kind) { |
|
4331 |
return _use_pos_and_kinds.at(i); |
|
4332 |
} |
|
4333 |
} |
|
4334 |
return max_jint; |
|
4335 |
} |
|
4336 |
||
4337 |
int Interval::previous_usage(IntervalUseKind min_use_kind, int from) const { |
|
4338 |
assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals"); |
|
4339 |
||
4340 |
int prev = 0; |
|
4341 |
for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) { |
|
4342 |
if (_use_pos_and_kinds.at(i) > from) { |
|
4343 |
return prev; |
|
4344 |
} |
|
4345 |
if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) { |
|
4346 |
prev = _use_pos_and_kinds.at(i); |
|
4347 |
} |
|
4348 |
} |
|
4349 |
return prev; |
|
4350 |
} |
|
4351 |
||
4352 |
void Interval::add_use_pos(int pos, IntervalUseKind use_kind) { |
|
4353 |
assert(covers(pos, LIR_OpVisitState::inputMode), "use position not covered by live range"); |
|
4354 |
||
4355 |
// do not add use positions for precolored intervals because |
|
4356 |
// they are never used |
|
4357 |
if (use_kind != noUse && reg_num() >= LIR_OprDesc::vreg_base) { |
|
4358 |
#ifdef ASSERT |
|
4359 |
assert(_use_pos_and_kinds.length() % 2 == 0, "must be"); |
|
4360 |
for (int i = 0; i < _use_pos_and_kinds.length(); i += 2) { |
|
4361 |
assert(pos <= _use_pos_and_kinds.at(i), "already added a use-position with lower position"); |
|
4362 |
assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind"); |
|
4363 |
if (i > 0) { |
|
4364 |
assert(_use_pos_and_kinds.at(i) < _use_pos_and_kinds.at(i - 2), "not sorted descending"); |
|
4365 |
} |
|
4366 |
} |
|
4367 |
#endif |
|
4368 |
||
4369 |
// Note: add_use is called in descending order, so list gets sorted |
|
4370 |
// automatically by just appending new use positions |
|
4371 |
int len = _use_pos_and_kinds.length(); |
|
4372 |
if (len == 0 || _use_pos_and_kinds.at(len - 2) > pos) { |
|
4373 |
_use_pos_and_kinds.append(pos); |
|
4374 |
_use_pos_and_kinds.append(use_kind); |
|
4375 |
} else if (_use_pos_and_kinds.at(len - 1) < use_kind) { |
|
4376 |
assert(_use_pos_and_kinds.at(len - 2) == pos, "list not sorted correctly"); |
|
4377 |
_use_pos_and_kinds.at_put(len - 1, use_kind); |
|
4378 |
} |
|
4379 |
} |
|
4380 |
} |
|
4381 |
||
4382 |
void Interval::add_range(int from, int to) { |
|
4383 |
assert(from < to, "invalid range"); |
|
4384 |
assert(first() == Range::end() || to < first()->next()->from(), "not inserting at begin of interval"); |
|
4385 |
assert(from <= first()->to(), "not inserting at begin of interval"); |
|
4386 |
||
4387 |
if (first()->from() <= to) { |
|
4388 |
// join intersecting ranges |
|
4389 |
first()->set_from(MIN2(from, first()->from())); |
|
4390 |
first()->set_to (MAX2(to, first()->to())); |
|
4391 |
} else { |
|
4392 |
// insert new range |
|
4393 |
_first = new Range(from, to, first()); |
|
4394 |
} |
|
4395 |
} |
|
4396 |
||
4397 |
Interval* Interval::new_split_child() { |
|
4398 |
// allocate new interval |
|
4399 |
Interval* result = new Interval(-1); |
|
4400 |
result->set_type(type()); |
|
4401 |
||
4402 |
Interval* parent = split_parent(); |
|
4403 |
result->_split_parent = parent; |
|
4404 |
result->set_register_hint(parent); |
|
4405 |
||
4406 |
// insert new interval in children-list of parent |
|
4407 |
if (parent->_split_children.length() == 0) { |
|
4408 |
assert(is_split_parent(), "list must be initialized at first split"); |
|
4409 |
||
4410 |
parent->_split_children = IntervalList(4); |
|
4411 |
parent->_split_children.append(this); |
|
4412 |
} |
|
4413 |
parent->_split_children.append(result); |
|
4414 |
||
4415 |
return result; |
|
4416 |
} |
|
4417 |
||
4418 |
// split this interval at the specified position and return |
|
4419 |
// the remainder as a new interval. |
|
4420 |
// |
|
4421 |
// when an interval is split, a bi-directional link is established between the original interval |
|
4422 |
// (the split parent) and the intervals that are split off this interval (the split children) |
|
4423 |
// When a split child is split again, the new created interval is also a direct child |
|
4424 |
// of the original parent (there is no tree of split children stored, but a flat list) |
|
4425 |
// All split children are spilled to the same stack slot (stored in _canonical_spill_slot) |
|
4426 |
// |
|
4427 |
// Note: The new interval has no valid reg_num |
|
4428 |
Interval* Interval::split(int split_pos) { |
|
4429 |
assert(LinearScan::is_virtual_interval(this), "cannot split fixed intervals"); |
|
4430 |
||
4431 |
// allocate new interval |
|
4432 |
Interval* result = new_split_child(); |
|
4433 |
||
4434 |
// split the ranges |
|
4435 |
Range* prev = NULL; |
|
4436 |
Range* cur = _first; |
|
4437 |
while (cur != Range::end() && cur->to() <= split_pos) { |
|
4438 |
prev = cur; |
|
4439 |
cur = cur->next(); |
|
4440 |
} |
|
4441 |
assert(cur != Range::end(), "split interval after end of last range"); |
|
4442 |
||
4443 |
if (cur->from() < split_pos) { |
|
4444 |
result->_first = new Range(split_pos, cur->to(), cur->next()); |
|
4445 |
cur->set_to(split_pos); |
|
4446 |
cur->set_next(Range::end()); |
|
4447 |
||
4448 |
} else { |
|
4449 |
assert(prev != NULL, "split before start of first range"); |
|
4450 |
result->_first = cur; |
|
4451 |
prev->set_next(Range::end()); |
|
4452 |
} |
|
4453 |
result->_current = result->_first; |
|
4454 |
_cached_to = -1; // clear cached value |
|
4455 |
||
4456 |
// split list of use positions |
|
4457 |
int total_len = _use_pos_and_kinds.length(); |
|
4458 |
int start_idx = total_len - 2; |
|
4459 |
while (start_idx >= 0 && _use_pos_and_kinds.at(start_idx) < split_pos) { |
|
4460 |
start_idx -= 2; |
|
4461 |
} |
|
4462 |
||
4463 |
intStack new_use_pos_and_kinds(total_len - start_idx); |
|
4464 |
int i; |
|
4465 |
for (i = start_idx + 2; i < total_len; i++) { |
|
4466 |
new_use_pos_and_kinds.append(_use_pos_and_kinds.at(i)); |
|
4467 |
} |
|
4468 |
||
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
4469 |
_use_pos_and_kinds.trunc_to(start_idx + 2); |
1 | 4470 |
result->_use_pos_and_kinds = _use_pos_and_kinds; |
4471 |
_use_pos_and_kinds = new_use_pos_and_kinds; |
|
4472 |
||
4473 |
#ifdef ASSERT |
|
4474 |
assert(_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos"); |
|
4475 |
assert(result->_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos"); |
|
4476 |
assert(_use_pos_and_kinds.length() + result->_use_pos_and_kinds.length() == total_len, "missed some entries"); |
|
4477 |
||
4478 |
for (i = 0; i < _use_pos_and_kinds.length(); i += 2) { |
|
4479 |
assert(_use_pos_and_kinds.at(i) < split_pos, "must be"); |
|
4480 |
assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind"); |
|
4481 |
} |
|
4482 |
for (i = 0; i < result->_use_pos_and_kinds.length(); i += 2) { |
|
4483 |
assert(result->_use_pos_and_kinds.at(i) >= split_pos, "must be"); |
|
4484 |
assert(result->_use_pos_and_kinds.at(i + 1) >= firstValidKind && result->_use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind"); |
|
4485 |
} |
|
4486 |
#endif |
|
4487 |
||
4488 |
return result; |
|
4489 |
} |
|
4490 |
||
4491 |
// split this interval at the specified position and return |
|
4492 |
// the head as a new interval (the original interval is the tail) |
|
4493 |
// |
|
4494 |
// Currently, only the first range can be split, and the new interval |
|
4495 |
// must not have split positions |
|
4496 |
Interval* Interval::split_from_start(int split_pos) { |
|
4497 |
assert(LinearScan::is_virtual_interval(this), "cannot split fixed intervals"); |
|
4498 |
assert(split_pos > from() && split_pos < to(), "can only split inside interval"); |
|
4499 |
assert(split_pos > _first->from() && split_pos <= _first->to(), "can only split inside first range"); |
|
4500 |
assert(first_usage(noUse) > split_pos, "can not split when use positions are present"); |
|
4501 |
||
4502 |
// allocate new interval |
|
4503 |
Interval* result = new_split_child(); |
|
4504 |
||
4505 |
// the new created interval has only one range (checked by assertion above), |
|
4506 |
// so the splitting of the ranges is very simple |
|
4507 |
result->add_range(_first->from(), split_pos); |
|
4508 |
||
4509 |
if (split_pos == _first->to()) { |
|
4510 |
assert(_first->next() != Range::end(), "must not be at end"); |
|
4511 |
_first = _first->next(); |
|
4512 |
} else { |
|
4513 |
_first->set_from(split_pos); |
|
4514 |
} |
|
4515 |
||
4516 |
return result; |
|
4517 |
} |
|
4518 |
||
4519 |
||
4520 |
// returns true if the op_id is inside the interval |
|
4521 |
bool Interval::covers(int op_id, LIR_OpVisitState::OprMode mode) const { |
|
4522 |
Range* cur = _first; |
|
4523 |
||
4524 |
while (cur != Range::end() && cur->to() < op_id) { |
|
4525 |
cur = cur->next(); |
|
4526 |
} |
|
4527 |
if (cur != Range::end()) { |
|
4528 |
assert(cur->to() != cur->next()->from(), "ranges not separated"); |
|
4529 |
||
4530 |
if (mode == LIR_OpVisitState::outputMode) { |
|
4531 |
return cur->from() <= op_id && op_id < cur->to(); |
|
4532 |
} else { |
|
4533 |
return cur->from() <= op_id && op_id <= cur->to(); |
|
4534 |
} |
|
4535 |
} |
|
4536 |
return false; |
|
4537 |
} |
|
4538 |
||
4539 |
// returns true if the interval has any hole between hole_from and hole_to |
|
4540 |
// (even if the hole has only the length 1) |
|
4541 |
bool Interval::has_hole_between(int hole_from, int hole_to) { |
|
4542 |
assert(hole_from < hole_to, "check"); |
|
4543 |
assert(from() <= hole_from && hole_to <= to(), "index out of interval"); |
|
4544 |
||
4545 |
Range* cur = _first; |
|
4546 |
while (cur != Range::end()) { |
|
4547 |
assert(cur->to() < cur->next()->from(), "no space between ranges"); |
|
4548 |
||
4549 |
// hole-range starts before this range -> hole |
|
4550 |
if (hole_from < cur->from()) { |
|
4551 |
return true; |
|
4552 |
||
4553 |
// hole-range completely inside this range -> no hole |
|
4554 |
} else if (hole_to <= cur->to()) { |
|
4555 |
return false; |
|
4556 |
||
4557 |
// overlapping of hole-range with this range -> hole |
|
4558 |
} else if (hole_from <= cur->to()) { |
|
4559 |
return true; |
|
4560 |
} |
|
4561 |
||
4562 |
cur = cur->next(); |
|
4563 |
} |
|
4564 |
||
4565 |
return false; |
|
4566 |
} |
|
4567 |
||
4568 |
||
4569 |
#ifndef PRODUCT |
|
4570 |
void Interval::print(outputStream* out) const { |
|
4571 |
const char* SpillState2Name[] = { "no definition", "no spill store", "one spill store", "store at definition", "start in memory", "no optimization" }; |
|
4572 |
const char* UseKind2Name[] = { "N", "L", "S", "M" }; |
|
4573 |
||
4574 |
const char* type_name; |
|
4575 |
LIR_Opr opr = LIR_OprFact::illegal(); |
|
4576 |
if (reg_num() < LIR_OprDesc::vreg_base) { |
|
4577 |
type_name = "fixed"; |
|
4578 |
// need a temporary operand for fixed intervals because type() cannot be called |
|
30624 | 4579 |
#ifdef X86 |
4580 |
int last_xmm_reg = pd_last_xmm_reg; |
|
4581 |
#ifdef _LP64 |
|
4582 |
if (UseAVX < 3) { |
|
4583 |
last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1; |
|
4584 |
} |
|
4585 |
#endif |
|
4586 |
#endif |
|
1 | 4587 |
if (assigned_reg() >= pd_first_cpu_reg && assigned_reg() <= pd_last_cpu_reg) { |
4588 |
opr = LIR_OprFact::single_cpu(assigned_reg()); |
|
4589 |
} else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) { |
|
4590 |
opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg); |
|
1066 | 4591 |
#ifdef X86 |
30624 | 4592 |
} else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= last_xmm_reg) { |
1 | 4593 |
opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg); |
4594 |
#endif |
|
4595 |
} else { |
|
4596 |
ShouldNotReachHere(); |
|
4597 |
} |
|
4598 |
} else { |
|
4599 |
type_name = type2name(type()); |
|
7705
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
4600 |
if (assigned_reg() != -1 && |
50fdff25b18d
6579789: Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
never
parents:
7427
diff
changeset
|
4601 |
(LinearScan::num_physical_regs(type()) == 1 || assigned_regHi() != -1)) { |
1 | 4602 |
opr = LinearScan::calc_operand_for_interval(this); |
4603 |
} |
|
4604 |
} |
|
4605 |
||
4606 |
out->print("%d %s ", reg_num(), type_name); |
|
4607 |
if (opr->is_valid()) { |
|
4608 |
out->print("\""); |
|
4609 |
opr->print(out); |
|
4610 |
out->print("\" "); |
|
4611 |
} |
|
4612 |
out->print("%d %d ", split_parent()->reg_num(), (register_hint(false) != NULL ? register_hint(false)->reg_num() : -1)); |
|
4613 |
||
4614 |
// print ranges |
|
4615 |
Range* cur = _first; |
|
4616 |
while (cur != Range::end()) { |
|
4617 |
cur->print(out); |
|
4618 |
cur = cur->next(); |
|
4619 |
assert(cur != NULL, "range list not closed with range sentinel"); |
|
4620 |
} |
|
4621 |
||
4622 |
// print use positions |
|
4623 |
int prev = 0; |
|
4624 |
assert(_use_pos_and_kinds.length() % 2 == 0, "must be"); |
|
4625 |
for (int i =_use_pos_and_kinds.length() - 2; i >= 0; i -= 2) { |
|
4626 |
assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind"); |
|
4627 |
assert(prev < _use_pos_and_kinds.at(i), "use positions not sorted"); |
|
4628 |
||
4629 |
out->print("%d %s ", _use_pos_and_kinds.at(i), UseKind2Name[_use_pos_and_kinds.at(i + 1)]); |
|
4630 |
prev = _use_pos_and_kinds.at(i); |
|
4631 |
} |
|
4632 |
||
4633 |
out->print(" \"%s\"", SpillState2Name[spill_state()]); |
|
4634 |
out->cr(); |
|
4635 |
} |
|
4636 |
#endif |
|
4637 |
||
4638 |
||
4639 |
||
4640 |
// **** Implementation of IntervalWalker **************************** |
|
4641 |
||
4642 |
IntervalWalker::IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first) |
|
4643 |
: _compilation(allocator->compilation()) |
|
4644 |
, _allocator(allocator) |
|
4645 |
{ |
|
4646 |
_unhandled_first[fixedKind] = unhandled_fixed_first; |
|
4647 |
_unhandled_first[anyKind] = unhandled_any_first; |
|
4648 |
_active_first[fixedKind] = Interval::end(); |
|
4649 |
_inactive_first[fixedKind] = Interval::end(); |
|
4650 |
_active_first[anyKind] = Interval::end(); |
|
4651 |
_inactive_first[anyKind] = Interval::end(); |
|
4652 |
_current_position = -1; |
|
4653 |
_current = NULL; |
|
4654 |
next_interval(); |
|
4655 |
} |
|
4656 |
||
4657 |
||
4658 |
// append interval at top of list |
|
4659 |
void IntervalWalker::append_unsorted(Interval** list, Interval* interval) { |
|
4660 |
interval->set_next(*list); *list = interval; |
|
4661 |
} |
|
4662 |
||
4663 |
||
4664 |
// append interval in order of current range from() |
|
4665 |
void IntervalWalker::append_sorted(Interval** list, Interval* interval) { |
|
4666 |
Interval* prev = NULL; |
|
4667 |
Interval* cur = *list; |
|
4668 |
while (cur->current_from() < interval->current_from()) { |
|
4669 |
prev = cur; cur = cur->next(); |
|
4670 |
} |
|
4671 |
if (prev == NULL) { |
|
4672 |
*list = interval; |
|
4673 |
} else { |
|
4674 |
prev->set_next(interval); |
|
4675 |
} |
|
4676 |
interval->set_next(cur); |
|
4677 |
} |
|
4678 |
||
4679 |
void IntervalWalker::append_to_unhandled(Interval** list, Interval* interval) { |
|
4680 |
assert(interval->from() >= current()->current_from(), "cannot append new interval before current walk position"); |
|
4681 |
||
4682 |
Interval* prev = NULL; |
|
4683 |
Interval* cur = *list; |
|
4684 |
while (cur->from() < interval->from() || (cur->from() == interval->from() && cur->first_usage(noUse) < interval->first_usage(noUse))) { |
|
4685 |
prev = cur; cur = cur->next(); |
|
4686 |
} |
|
4687 |
if (prev == NULL) { |
|
4688 |
*list = interval; |
|
4689 |
} else { |
|
4690 |
prev->set_next(interval); |
|
4691 |
} |
|
4692 |
interval->set_next(cur); |
|
4693 |
} |
|
4694 |
||
4695 |
||
4696 |
inline bool IntervalWalker::remove_from_list(Interval** list, Interval* i) { |
|
4697 |
while (*list != Interval::end() && *list != i) { |
|
4698 |
list = (*list)->next_addr(); |
|
4699 |
} |
|
4700 |
if (*list != Interval::end()) { |
|
4701 |
assert(*list == i, "check"); |
|
4702 |
*list = (*list)->next(); |
|
4703 |
return true; |
|
4704 |
} else { |
|
4705 |
return false; |
|
4706 |
} |
|
4707 |
} |
|
4708 |
||
4709 |
void IntervalWalker::remove_from_list(Interval* i) { |
|
4710 |
bool deleted; |
|
4711 |
||
4712 |
if (i->state() == activeState) { |
|
4713 |
deleted = remove_from_list(active_first_addr(anyKind), i); |
|
4714 |
} else { |
|
4715 |
assert(i->state() == inactiveState, "invalid state"); |
|
4716 |
deleted = remove_from_list(inactive_first_addr(anyKind), i); |
|
4717 |
} |
|
4718 |
||
4719 |
assert(deleted, "interval has not been found in list"); |
|
4720 |
} |
|
4721 |
||
4722 |
||
4723 |
void IntervalWalker::walk_to(IntervalState state, int from) { |
|
4724 |
assert (state == activeState || state == inactiveState, "wrong state"); |
|
4725 |
for_each_interval_kind(kind) { |
|
4726 |
Interval** prev = state == activeState ? active_first_addr(kind) : inactive_first_addr(kind); |
|
4727 |
Interval* next = *prev; |
|
4728 |
while (next->current_from() <= from) { |
|
4729 |
Interval* cur = next; |
|
4730 |
next = cur->next(); |
|
4731 |
||
4732 |
bool range_has_changed = false; |
|
4733 |
while (cur->current_to() <= from) { |
|
4734 |
cur->next_range(); |
|
4735 |
range_has_changed = true; |
|
4736 |
} |
|
4737 |
||
4738 |
// also handle move from inactive list to active list |
|
4739 |
range_has_changed = range_has_changed || (state == inactiveState && cur->current_from() <= from); |
|
4740 |
||
4741 |
if (range_has_changed) { |
|
4742 |
// remove cur from list |
|
4743 |
*prev = next; |
|
4744 |
if (cur->current_at_end()) { |
|
4745 |
// move to handled state (not maintained as a list) |
|
4746 |
cur->set_state(handledState); |
|
4747 |
interval_moved(cur, kind, state, handledState); |
|
4748 |
} else if (cur->current_from() <= from){ |
|
4749 |
// sort into active list |
|
4750 |
append_sorted(active_first_addr(kind), cur); |
|
4751 |
cur->set_state(activeState); |
|
4752 |
if (*prev == cur) { |
|
4753 |
assert(state == activeState, "check"); |
|
4754 |
prev = cur->next_addr(); |
|
4755 |
} |
|
4756 |
interval_moved(cur, kind, state, activeState); |
|
4757 |
} else { |
|
4758 |
// sort into inactive list |
|
4759 |
append_sorted(inactive_first_addr(kind), cur); |
|
4760 |
cur->set_state(inactiveState); |
|
4761 |
if (*prev == cur) { |
|
4762 |
assert(state == inactiveState, "check"); |
|
4763 |
prev = cur->next_addr(); |
|
4764 |
} |
|
4765 |
interval_moved(cur, kind, state, inactiveState); |
|
4766 |
} |
|
4767 |
} else { |
|
4768 |
prev = cur->next_addr(); |
|
4769 |
continue; |
|
4770 |
} |
|
4771 |
} |
|
4772 |
} |
|
4773 |
} |
|
4774 |
||
4775 |
||
4776 |
void IntervalWalker::next_interval() { |
|
4777 |
IntervalKind kind; |
|
4778 |
Interval* any = _unhandled_first[anyKind]; |
|
4779 |
Interval* fixed = _unhandled_first[fixedKind]; |
|
4780 |
||
4781 |
if (any != Interval::end()) { |
|
4782 |
// intervals may start at same position -> prefer fixed interval |
|
4783 |
kind = fixed != Interval::end() && fixed->from() <= any->from() ? fixedKind : anyKind; |
|
4784 |
||
4785 |
assert (kind == fixedKind && fixed->from() <= any->from() || |
|
4786 |
kind == anyKind && any->from() <= fixed->from(), "wrong interval!!!"); |
|
4787 |
assert(any == Interval::end() || fixed == Interval::end() || any->from() != fixed->from() || kind == fixedKind, "if fixed and any-Interval start at same position, fixed must be processed first"); |
|
4788 |
||
4789 |
} else if (fixed != Interval::end()) { |
|
4790 |
kind = fixedKind; |
|
4791 |
} else { |
|
4792 |
_current = NULL; return; |
|
4793 |
} |
|
4794 |
_current_kind = kind; |
|
4795 |
_current = _unhandled_first[kind]; |
|
4796 |
_unhandled_first[kind] = _current->next(); |
|
4797 |
_current->set_next(Interval::end()); |
|
4798 |
_current->rewind_range(); |
|
4799 |
} |
|
4800 |
||
4801 |
||
4802 |
void IntervalWalker::walk_to(int lir_op_id) { |
|
4803 |
assert(_current_position <= lir_op_id, "can not walk backwards"); |
|
4804 |
while (current() != NULL) { |
|
4805 |
bool is_active = current()->from() <= lir_op_id; |
|
4806 |
int id = is_active ? current()->from() : lir_op_id; |
|
4807 |
||
4808 |
TRACE_LINEAR_SCAN(2, if (_current_position < id) { tty->cr(); tty->print_cr("walk_to(%d) **************************************************************", id); }) |
|
4809 |
||
4810 |
// set _current_position prior to call of walk_to |
|
4811 |
_current_position = id; |
|
4812 |
||
4813 |
// call walk_to even if _current_position == id |
|
4814 |
walk_to(activeState, id); |
|
4815 |
walk_to(inactiveState, id); |
|
4816 |
||
4817 |
if (is_active) { |
|
4818 |
current()->set_state(activeState); |
|
4819 |
if (activate_current()) { |
|
4820 |
append_sorted(active_first_addr(current_kind()), current()); |
|
4821 |
interval_moved(current(), current_kind(), unhandledState, activeState); |
|
4822 |
} |
|
4823 |
||
4824 |
next_interval(); |
|
4825 |
} else { |
|
4826 |
return; |
|
4827 |
} |
|
4828 |
} |
|
4829 |
} |
|
4830 |
||
4831 |
void IntervalWalker::interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to) { |
|
4832 |
#ifndef PRODUCT |
|
4833 |
if (TraceLinearScanLevel >= 4) { |
|
4834 |
#define print_state(state) \ |
|
4835 |
switch(state) {\ |
|
4836 |
case unhandledState: tty->print("unhandled"); break;\ |
|
4837 |
case activeState: tty->print("active"); break;\ |
|
4838 |
case inactiveState: tty->print("inactive"); break;\ |
|
4839 |
case handledState: tty->print("handled"); break;\ |
|
4840 |
default: ShouldNotReachHere(); \ |
|
4841 |
} |
|
4842 |
||
4843 |
print_state(from); tty->print(" to "); print_state(to); |
|
4844 |
tty->fill_to(23); |
|
4845 |
interval->print(); |
|
4846 |
||
4847 |
#undef print_state |
|
4848 |
} |
|
4849 |
#endif |
|
4850 |
} |
|
4851 |
||
4852 |
||
4853 |
||
4854 |
// **** Implementation of LinearScanWalker ************************** |
|
4855 |
||
4856 |
LinearScanWalker::LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first) |
|
4857 |
: IntervalWalker(allocator, unhandled_fixed_first, unhandled_any_first) |
|
4858 |
, _move_resolver(allocator) |
|
4859 |
{ |
|
4860 |
for (int i = 0; i < LinearScan::nof_regs; i++) { |
|
4861 |
_spill_intervals[i] = new IntervalList(2); |
|
4862 |
} |
|
4863 |
} |
|
4864 |
||
4865 |
||
4866 |
inline void LinearScanWalker::init_use_lists(bool only_process_use_pos) { |
|
4867 |
for (int i = _first_reg; i <= _last_reg; i++) { |
|
4868 |
_use_pos[i] = max_jint; |
|
4869 |
||
4870 |
if (!only_process_use_pos) { |
|
4871 |
_block_pos[i] = max_jint; |
|
4872 |
_spill_intervals[i]->clear(); |
|
4873 |
} |
|
4874 |
} |
|
4875 |
} |
|
4876 |
||
4877 |
inline void LinearScanWalker::exclude_from_use(int reg) { |
|
4878 |
assert(reg < LinearScan::nof_regs, "interval must have a register assigned (stack slots not allowed)"); |
|
4879 |
if (reg >= _first_reg && reg <= _last_reg) { |
|
4880 |
_use_pos[reg] = 0; |
|
4881 |
} |
|
4882 |
} |
|
4883 |
inline void LinearScanWalker::exclude_from_use(Interval* i) { |
|
4884 |
assert(i->assigned_reg() != any_reg, "interval has no register assigned"); |
|
4885 |
||
4886 |
exclude_from_use(i->assigned_reg()); |
|
4887 |
exclude_from_use(i->assigned_regHi()); |
|
4888 |
} |
|
4889 |
||
4890 |
inline void LinearScanWalker::set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos) { |
|
4891 |
assert(use_pos != 0, "must use exclude_from_use to set use_pos to 0"); |
|
4892 |
||
4893 |
if (reg >= _first_reg && reg <= _last_reg) { |
|
4894 |
if (_use_pos[reg] > use_pos) { |
|
4895 |
_use_pos[reg] = use_pos; |
|
4896 |
} |
|
4897 |
if (!only_process_use_pos) { |
|
4898 |
_spill_intervals[reg]->append(i); |
|
4899 |
} |
|
4900 |
} |
|
4901 |
} |
|
4902 |
inline void LinearScanWalker::set_use_pos(Interval* i, int use_pos, bool only_process_use_pos) { |
|
4903 |
assert(i->assigned_reg() != any_reg, "interval has no register assigned"); |
|
4904 |
if (use_pos != -1) { |
|
4905 |
set_use_pos(i->assigned_reg(), i, use_pos, only_process_use_pos); |
|
4906 |
set_use_pos(i->assigned_regHi(), i, use_pos, only_process_use_pos); |
|
4907 |
} |
|
4908 |
} |
|
4909 |
||
4910 |
inline void LinearScanWalker::set_block_pos(int reg, Interval* i, int block_pos) { |
|
4911 |
if (reg >= _first_reg && reg <= _last_reg) { |
|
4912 |
if (_block_pos[reg] > block_pos) { |
|
4913 |
_block_pos[reg] = block_pos; |
|
4914 |
} |
|
4915 |
if (_use_pos[reg] > block_pos) { |
|
4916 |
_use_pos[reg] = block_pos; |
|
4917 |
} |
|
4918 |
} |
|
4919 |
} |
|
4920 |
inline void LinearScanWalker::set_block_pos(Interval* i, int block_pos) { |
|
4921 |
assert(i->assigned_reg() != any_reg, "interval has no register assigned"); |
|
4922 |
if (block_pos != -1) { |
|
4923 |
set_block_pos(i->assigned_reg(), i, block_pos); |
|
4924 |
set_block_pos(i->assigned_regHi(), i, block_pos); |
|
4925 |
} |
|
4926 |
} |
|
4927 |
||
4928 |
||
4929 |
void LinearScanWalker::free_exclude_active_fixed() { |
|
4930 |
Interval* list = active_first(fixedKind); |
|
4931 |
while (list != Interval::end()) { |
|
4932 |
assert(list->assigned_reg() < LinearScan::nof_regs, "active interval must have a register assigned"); |
|
4933 |
exclude_from_use(list); |
|
4934 |
list = list->next(); |
|
4935 |
} |
|
4936 |
} |
|
4937 |
||
4938 |
void LinearScanWalker::free_exclude_active_any() { |
|
4939 |
Interval* list = active_first(anyKind); |
|
4940 |
while (list != Interval::end()) { |
|
4941 |
exclude_from_use(list); |
|
4942 |
list = list->next(); |
|
4943 |
} |
|
4944 |
} |
|
4945 |
||
4946 |
void LinearScanWalker::free_collect_inactive_fixed(Interval* cur) { |
|
4947 |
Interval* list = inactive_first(fixedKind); |
|
4948 |
while (list != Interval::end()) { |
|
4949 |
if (cur->to() <= list->current_from()) { |
|
4950 |
assert(list->current_intersects_at(cur) == -1, "must not intersect"); |
|
4951 |
set_use_pos(list, list->current_from(), true); |
|
4952 |
} else { |
|
4953 |
set_use_pos(list, list->current_intersects_at(cur), true); |
|
4954 |
} |
|
4955 |
list = list->next(); |
|
4956 |
} |
|
4957 |
} |
|
4958 |
||
4959 |
void LinearScanWalker::free_collect_inactive_any(Interval* cur) { |
|
4960 |
Interval* list = inactive_first(anyKind); |
|
4961 |
while (list != Interval::end()) { |
|
4962 |
set_use_pos(list, list->current_intersects_at(cur), true); |
|
4963 |
list = list->next(); |
|
4964 |
} |
|
4965 |
} |
|
4966 |
||
4967 |
void LinearScanWalker::free_collect_unhandled(IntervalKind kind, Interval* cur) { |
|
4968 |
Interval* list = unhandled_first(kind); |
|
4969 |
while (list != Interval::end()) { |
|
4970 |
set_use_pos(list, list->intersects_at(cur), true); |
|
4971 |
if (kind == fixedKind && cur->to() <= list->from()) { |
|
4972 |
set_use_pos(list, list->from(), true); |
|
4973 |
} |
|
4974 |
list = list->next(); |
|
4975 |
} |
|
4976 |
} |
|
4977 |
||
4978 |
void LinearScanWalker::spill_exclude_active_fixed() { |
|
4979 |
Interval* list = active_first(fixedKind); |
|
4980 |
while (list != Interval::end()) { |
|
4981 |
exclude_from_use(list); |
|
4982 |
list = list->next(); |
|
4983 |
} |
|
4984 |
} |
|
4985 |
||
4986 |
void LinearScanWalker::spill_block_unhandled_fixed(Interval* cur) { |
|
4987 |
Interval* list = unhandled_first(fixedKind); |
|
4988 |
while (list != Interval::end()) { |
|
4989 |
set_block_pos(list, list->intersects_at(cur)); |
|
4990 |
list = list->next(); |
|
4991 |
} |
|
4992 |
} |
|
4993 |
||
4994 |
void LinearScanWalker::spill_block_inactive_fixed(Interval* cur) { |
|
4995 |
Interval* list = inactive_first(fixedKind); |
|
4996 |
while (list != Interval::end()) { |
|
4997 |
if (cur->to() > list->current_from()) { |
|
4998 |
set_block_pos(list, list->current_intersects_at(cur)); |
|
4999 |
} else { |
|
5000 |
assert(list->current_intersects_at(cur) == -1, "invalid optimization: intervals intersect"); |
|
5001 |
} |
|
5002 |
||
5003 |
list = list->next(); |
|
5004 |
} |
|
5005 |
} |
|
5006 |
||
5007 |
void LinearScanWalker::spill_collect_active_any() { |
|
5008 |
Interval* list = active_first(anyKind); |
|
5009 |
while (list != Interval::end()) { |
|
5010 |
set_use_pos(list, MIN2(list->next_usage(loopEndMarker, _current_position), list->to()), false); |
|
5011 |
list = list->next(); |
|
5012 |
} |
|
5013 |
} |
|
5014 |
||
5015 |
void LinearScanWalker::spill_collect_inactive_any(Interval* cur) { |
|
5016 |
Interval* list = inactive_first(anyKind); |
|
5017 |
while (list != Interval::end()) { |
|
5018 |
if (list->current_intersects(cur)) { |
|
5019 |
set_use_pos(list, MIN2(list->next_usage(loopEndMarker, _current_position), list->to()), false); |
|
5020 |
} |
|
5021 |
list = list->next(); |
|
5022 |
} |
|
5023 |
} |
|
5024 |
||
5025 |
||
5026 |
void LinearScanWalker::insert_move(int op_id, Interval* src_it, Interval* dst_it) { |
|
5027 |
// output all moves here. When source and target are equal, the move is |
|
5028 |
// optimized away later in assign_reg_nums |
|
5029 |
||
5030 |
op_id = (op_id + 1) & ~1; |
|
5031 |
BlockBegin* op_block = allocator()->block_of_op_with_id(op_id); |
|
5032 |
assert(op_id > 0 && allocator()->block_of_op_with_id(op_id - 2) == op_block, "cannot insert move at block boundary"); |
|
5033 |
||
5034 |
// calculate index of instruction inside instruction list of current block |
|
5035 |
// the minimal index (for a block with no spill moves) can be calculated because the |
|
5036 |
// numbering of instructions is known. |
|
5037 |
// When the block already contains spill moves, the index must be increased until the |
|
5038 |
// correct index is reached. |
|
5039 |
LIR_OpList* list = op_block->lir()->instructions_list(); |
|
5040 |
int index = (op_id - list->at(0)->id()) / 2; |
|
5041 |
assert(list->at(index)->id() <= op_id, "error in calculation"); |
|
5042 |
||
5043 |
while (list->at(index)->id() != op_id) { |
|
5044 |
index++; |
|
5045 |
assert(0 <= index && index < list->length(), "index out of bounds"); |
|
5046 |
} |
|
5047 |
assert(1 <= index && index < list->length(), "index out of bounds"); |
|
5048 |
assert(list->at(index)->id() == op_id, "error in calculation"); |
|
5049 |
||
5050 |
// insert new instruction before instruction at position index |
|
5051 |
_move_resolver.move_insert_position(op_block->lir(), index - 1); |
|
5052 |
_move_resolver.add_mapping(src_it, dst_it); |
|
5053 |
} |
|
5054 |
||
5055 |
||
5056 |
int LinearScanWalker::find_optimal_split_pos(BlockBegin* min_block, BlockBegin* max_block, int max_split_pos) { |
|
5057 |
int from_block_nr = min_block->linear_scan_number(); |
|
5058 |
int to_block_nr = max_block->linear_scan_number(); |
|
5059 |
||
5060 |
assert(0 <= from_block_nr && from_block_nr < block_count(), "out of range"); |
|
5061 |
assert(0 <= to_block_nr && to_block_nr < block_count(), "out of range"); |
|
5062 |
assert(from_block_nr < to_block_nr, "must cross block boundary"); |
|
5063 |
||
5064 |
// Try to split at end of max_block. If this would be after |
|
5065 |
// max_split_pos, then use the begin of max_block |
|
5066 |
int optimal_split_pos = max_block->last_lir_instruction_id() + 2; |
|
5067 |
if (optimal_split_pos > max_split_pos) { |
|
5068 |
optimal_split_pos = max_block->first_lir_instruction_id(); |
|
5069 |
} |
|
5070 |
||
5071 |
int min_loop_depth = max_block->loop_depth(); |
|
5072 |
for (int i = to_block_nr - 1; i >= from_block_nr; i--) { |
|
5073 |
BlockBegin* cur = block_at(i); |
|
5074 |
||
5075 |
if (cur->loop_depth() < min_loop_depth) { |
|
5076 |
// block with lower loop-depth found -> split at the end of this block |
|
5077 |
min_loop_depth = cur->loop_depth(); |
|
5078 |
optimal_split_pos = cur->last_lir_instruction_id() + 2; |
|
5079 |
} |
|
5080 |
} |
|
5081 |
assert(optimal_split_pos > allocator()->max_lir_op_id() || allocator()->is_block_begin(optimal_split_pos), "algorithm must move split pos to block boundary"); |
|
5082 |
||
5083 |
return optimal_split_pos; |
|
5084 |
} |
|
5085 |
||
5086 |
||
5087 |
int LinearScanWalker::find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization) { |
|
5088 |
int optimal_split_pos = -1; |
|
5089 |
if (min_split_pos == max_split_pos) { |
|
5090 |
// trivial case, no optimization of split position possible |
|
5091 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" min-pos and max-pos are equal, no optimization possible")); |
|
5092 |
optimal_split_pos = min_split_pos; |
|
5093 |
||
5094 |
} else { |
|
5095 |
assert(min_split_pos < max_split_pos, "must be true then"); |
|
5096 |
assert(min_split_pos > 0, "cannot access min_split_pos - 1 otherwise"); |
|
5097 |
||
5098 |
// reason for using min_split_pos - 1: when the minimal split pos is exactly at the |
|
5099 |
// beginning of a block, then min_split_pos is also a possible split position. |
|
5100 |
// Use the block before as min_block, because then min_block->last_lir_instruction_id() + 2 == min_split_pos |
|
5101 |
BlockBegin* min_block = allocator()->block_of_op_with_id(min_split_pos - 1); |
|
5102 |
||
5103 |
// reason for using max_split_pos - 1: otherwise there would be an assertion failure |
|
5104 |
// when an interval ends at the end of the last block of the method |
|
5105 |
// (in this case, max_split_pos == allocator()->max_lir_op_id() + 2, and there is no |
|
5106 |
// block at this op_id) |
|
5107 |
BlockBegin* max_block = allocator()->block_of_op_with_id(max_split_pos - 1); |
|
5108 |
||
5109 |
assert(min_block->linear_scan_number() <= max_block->linear_scan_number(), "invalid order"); |
|
5110 |
if (min_block == max_block) { |
|
5111 |
// split position cannot be moved to block boundary, so split as late as possible |
|
5112 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" cannot move split pos to block boundary because min_pos and max_pos are in same block")); |
|
5113 |
optimal_split_pos = max_split_pos; |
|
5114 |
||
5115 |
} else if (it->has_hole_between(max_split_pos - 1, max_split_pos) && !allocator()->is_block_begin(max_split_pos)) { |
|
5116 |
// Do not move split position if the interval has a hole before max_split_pos. |
|
5117 |
// Intervals resulting from Phi-Functions have more than one definition (marked |
|
5118 |
// as mustHaveRegister) with a hole before each definition. When the register is needed |
|
5119 |
// for the second definition, an earlier reloading is unnecessary. |
|
5120 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" interval has hole just before max_split_pos, so splitting at max_split_pos")); |
|
5121 |
optimal_split_pos = max_split_pos; |
|
5122 |
||
5123 |
} else { |
|
5124 |
// seach optimal block boundary between min_split_pos and max_split_pos |
|
5125 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" moving split pos to optimal block boundary between block B%d and B%d", min_block->block_id(), max_block->block_id())); |
|
5126 |
||
5127 |
if (do_loop_optimization) { |
|
5128 |
// Loop optimization: if a loop-end marker is found between min- and max-position, |
|
5129 |
// then split before this loop |
|
5130 |
int loop_end_pos = it->next_usage_exact(loopEndMarker, min_block->last_lir_instruction_id() + 2); |
|
5131 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" loop optimization: loop end found at pos %d", loop_end_pos)); |
|
5132 |
||
5133 |
assert(loop_end_pos > min_split_pos, "invalid order"); |
|
5134 |
if (loop_end_pos < max_split_pos) { |
|
5135 |
// loop-end marker found between min- and max-position |
|
5136 |
// if it is not the end marker for the same loop as the min-position, then move |
|
5137 |
// the max-position to this loop block. |
|
5138 |
// Desired result: uses tagged as shouldHaveRegister inside a loop cause a reloading |
|
5139 |
// of the interval (normally, only mustHaveRegister causes a reloading) |
|
5140 |
BlockBegin* loop_block = allocator()->block_of_op_with_id(loop_end_pos); |
|
5141 |
||
5142 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" interval is used in loop that ends in block B%d, so trying to move max_block back from B%d to B%d", loop_block->block_id(), max_block->block_id(), loop_block->block_id())); |
|
5143 |
assert(loop_block != min_block, "loop_block and min_block must be different because block boundary is needed between"); |
|
5144 |
||
5145 |
optimal_split_pos = find_optimal_split_pos(min_block, loop_block, loop_block->last_lir_instruction_id() + 2); |
|
5146 |
if (optimal_split_pos == loop_block->last_lir_instruction_id() + 2) { |
|
5147 |
optimal_split_pos = -1; |
|
5148 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" loop optimization not necessary")); |
|
5149 |
} else { |
|
5150 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" loop optimization successful")); |
|
5151 |
} |
|
5152 |
} |
|
5153 |
} |
|
5154 |
||
5155 |
if (optimal_split_pos == -1) { |
|
5156 |
// not calculated by loop optimization |
|
5157 |
optimal_split_pos = find_optimal_split_pos(min_block, max_block, max_split_pos); |
|
5158 |
} |
|
5159 |
} |
|
5160 |
} |
|
5161 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" optimal split position: %d", optimal_split_pos)); |
|
5162 |
||
5163 |
return optimal_split_pos; |
|
5164 |
} |
|
5165 |
||
5166 |
||
5167 |
/* |
|
5168 |
split an interval at the optimal position between min_split_pos and |
|
5169 |
max_split_pos in two parts: |
|
5170 |
1) the left part has already a location assigned |
|
5171 |
2) the right part is sorted into to the unhandled-list |
|
5172 |
*/ |
|
5173 |
void LinearScanWalker::split_before_usage(Interval* it, int min_split_pos, int max_split_pos) { |
|
5174 |
TRACE_LINEAR_SCAN(2, tty->print ("----- splitting interval: "); it->print()); |
|
5175 |
TRACE_LINEAR_SCAN(2, tty->print_cr(" between %d and %d", min_split_pos, max_split_pos)); |
|
5176 |
||
5177 |
assert(it->from() < min_split_pos, "cannot split at start of interval"); |
|
5178 |
assert(current_position() < min_split_pos, "cannot split before current position"); |
|
5179 |
assert(min_split_pos <= max_split_pos, "invalid order"); |
|
5180 |
assert(max_split_pos <= it->to(), "cannot split after end of interval"); |
|
5181 |
||
5182 |
int optimal_split_pos = find_optimal_split_pos(it, min_split_pos, max_split_pos, true); |
|
5183 |
||
5184 |
assert(min_split_pos <= optimal_split_pos && optimal_split_pos <= max_split_pos, "out of range"); |
|
5185 |
assert(optimal_split_pos <= it->to(), "cannot split after end of interval"); |
|
5186 |
assert(optimal_split_pos > it->from(), "cannot split at start of interval"); |
|
5187 |
||
5188 |
if (optimal_split_pos == it->to() && it->next_usage(mustHaveRegister, min_split_pos) == max_jint) { |
|
5189 |
// the split position would be just before the end of the interval |
|
5190 |
// -> no split at all necessary |
|
5191 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" no split necessary because optimal split position is at end of interval")); |
|
5192 |
return; |
|
5193 |
} |
|
5194 |
||
5195 |
// must calculate this before the actual split is performed and before split position is moved to odd op_id |
|
5196 |
bool move_necessary = !allocator()->is_block_begin(optimal_split_pos) && !it->has_hole_between(optimal_split_pos - 1, optimal_split_pos); |
|
5197 |
||
5198 |
if (!allocator()->is_block_begin(optimal_split_pos)) { |
|
5199 |
// move position before actual instruction (odd op_id) |
|
5200 |
optimal_split_pos = (optimal_split_pos - 1) | 1; |
|
5201 |
} |
|
5202 |
||
5203 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" splitting at position %d", optimal_split_pos)); |
|
5204 |
assert(allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 1), "split pos must be odd when not on block boundary"); |
|
5205 |
assert(!allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 0), "split pos must be even on block boundary"); |
|
5206 |
||
5207 |
Interval* split_part = it->split(optimal_split_pos); |
|
5208 |
||
5209 |
allocator()->append_interval(split_part); |
|
5210 |
allocator()->copy_register_flags(it, split_part); |
|
5211 |
split_part->set_insert_move_when_activated(move_necessary); |
|
5212 |
append_to_unhandled(unhandled_first_addr(anyKind), split_part); |
|
5213 |
||
5214 |
TRACE_LINEAR_SCAN(2, tty->print_cr(" split interval in two parts (insert_move_when_activated: %d)", move_necessary)); |
|
5215 |
TRACE_LINEAR_SCAN(2, tty->print (" "); it->print()); |
|
5216 |
TRACE_LINEAR_SCAN(2, tty->print (" "); split_part->print()); |
|
5217 |
} |
|
5218 |
||
5219 |
/* |
|
5220 |
split an interval at the optimal position between min_split_pos and |
|
5221 |
max_split_pos in two parts: |
|
5222 |
1) the left part has already a location assigned |
|
5223 |
2) the right part is always on the stack and therefore ignored in further processing |
|
5224 |
*/ |
|
5225 |
void LinearScanWalker::split_for_spilling(Interval* it) { |
|
5226 |
// calculate allowed range of splitting position |
|
5227 |
int max_split_pos = current_position(); |
|
5228 |
int min_split_pos = MAX2(it->previous_usage(shouldHaveRegister, max_split_pos) + 1, it->from()); |
|
5229 |
||
5230 |
TRACE_LINEAR_SCAN(2, tty->print ("----- splitting and spilling interval: "); it->print()); |
|
5231 |
TRACE_LINEAR_SCAN(2, tty->print_cr(" between %d and %d", min_split_pos, max_split_pos)); |
|
5232 |
||
5233 |
assert(it->state() == activeState, "why spill interval that is not active?"); |
|
5234 |
assert(it->from() <= min_split_pos, "cannot split before start of interval"); |
|
5235 |
assert(min_split_pos <= max_split_pos, "invalid order"); |
|
5236 |
assert(max_split_pos < it->to(), "cannot split at end end of interval"); |
|
5237 |
assert(current_position() < it->to(), "interval must not end before current position"); |
|
5238 |
||
5239 |
if (min_split_pos == it->from()) { |
|
5240 |
// the whole interval is never used, so spill it entirely to memory |
|
5241 |
TRACE_LINEAR_SCAN(2, tty->print_cr(" spilling entire interval because split pos is at beginning of interval")); |
|
5242 |
assert(it->first_usage(shouldHaveRegister) > current_position(), "interval must not have use position before current_position"); |
|
5243 |
||
5244 |
allocator()->assign_spill_slot(it); |
|
5245 |
allocator()->change_spill_state(it, min_split_pos); |
|
5246 |
||
5247 |
// Also kick parent intervals out of register to memory when they have no use |
|
5248 |
// position. This avoids short interval in register surrounded by intervals in |
|
5249 |
// memory -> avoid useless moves from memory to register and back |
|
5250 |
Interval* parent = it; |
|
5251 |
while (parent != NULL && parent->is_split_child()) { |
|
5252 |
parent = parent->split_child_before_op_id(parent->from()); |
|
5253 |
||
5254 |
if (parent->assigned_reg() < LinearScan::nof_regs) { |
|
5255 |
if (parent->first_usage(shouldHaveRegister) == max_jint) { |
|
5256 |
// parent is never used, so kick it out of its assigned register |
|
5257 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" kicking out interval %d out of its register because it is never used", parent->reg_num())); |
|
5258 |
allocator()->assign_spill_slot(parent); |
|
5259 |
} else { |
|
5260 |
// do not go further back because the register is actually used by the interval |
|
5261 |
parent = NULL; |
|
5262 |
} |
|
5263 |
} |
|
5264 |
} |
|
5265 |
||
5266 |
} else { |
|
5267 |
// search optimal split pos, split interval and spill only the right hand part |
|
5268 |
int optimal_split_pos = find_optimal_split_pos(it, min_split_pos, max_split_pos, false); |
|
5269 |
||
5270 |
assert(min_split_pos <= optimal_split_pos && optimal_split_pos <= max_split_pos, "out of range"); |
|
5271 |
assert(optimal_split_pos < it->to(), "cannot split at end of interval"); |
|
5272 |
assert(optimal_split_pos >= it->from(), "cannot split before start of interval"); |
|
5273 |
||
5274 |
if (!allocator()->is_block_begin(optimal_split_pos)) { |
|
5275 |
// move position before actual instruction (odd op_id) |
|
5276 |
optimal_split_pos = (optimal_split_pos - 1) | 1; |
|
5277 |
} |
|
5278 |
||
5279 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" splitting at position %d", optimal_split_pos)); |
|
5280 |
assert(allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 1), "split pos must be odd when not on block boundary"); |
|
5281 |
assert(!allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 0), "split pos must be even on block boundary"); |
|
5282 |
||
5283 |
Interval* spilled_part = it->split(optimal_split_pos); |
|
5284 |
allocator()->append_interval(spilled_part); |
|
5285 |
allocator()->assign_spill_slot(spilled_part); |
|
5286 |
allocator()->change_spill_state(spilled_part, optimal_split_pos); |
|
5287 |
||
5288 |
if (!allocator()->is_block_begin(optimal_split_pos)) { |
|
5289 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" inserting move from interval %d to %d", it->reg_num(), spilled_part->reg_num())); |
|
5290 |
insert_move(optimal_split_pos, it, spilled_part); |
|
5291 |
} |
|
5292 |
||
5293 |
// the current_split_child is needed later when moves are inserted for reloading |
|
5294 |
assert(spilled_part->current_split_child() == it, "overwriting wrong current_split_child"); |
|
5295 |
spilled_part->make_current_split_child(); |
|
5296 |
||
5297 |
TRACE_LINEAR_SCAN(2, tty->print_cr(" split interval in two parts")); |
|
5298 |
TRACE_LINEAR_SCAN(2, tty->print (" "); it->print()); |
|
5299 |
TRACE_LINEAR_SCAN(2, tty->print (" "); spilled_part->print()); |
|
5300 |
} |
|
5301 |
} |
|
5302 |
||
5303 |
||
5304 |
void LinearScanWalker::split_stack_interval(Interval* it) { |
|
5305 |
int min_split_pos = current_position() + 1; |
|
5306 |
int max_split_pos = MIN2(it->first_usage(shouldHaveRegister), it->to()); |
|
5307 |
||
5308 |
split_before_usage(it, min_split_pos, max_split_pos); |
|
5309 |
} |
|
5310 |
||
5311 |
void LinearScanWalker::split_when_partial_register_available(Interval* it, int register_available_until) { |
|
5312 |
int min_split_pos = MAX2(it->previous_usage(shouldHaveRegister, register_available_until), it->from() + 1); |
|
5313 |
int max_split_pos = register_available_until; |
|
5314 |
||
5315 |
split_before_usage(it, min_split_pos, max_split_pos); |
|
5316 |
} |
|
5317 |
||
5318 |
void LinearScanWalker::split_and_spill_interval(Interval* it) { |
|
5319 |
assert(it->state() == activeState || it->state() == inactiveState, "other states not allowed"); |
|
5320 |
||
5321 |
int current_pos = current_position(); |
|
5322 |
if (it->state() == inactiveState) { |
|
5323 |
// the interval is currently inactive, so no spill slot is needed for now. |
|
5324 |
// when the split part is activated, the interval has a new chance to get a register, |
|
5325 |
// so in the best case no stack slot is necessary |
|
5326 |
assert(it->has_hole_between(current_pos - 1, current_pos + 1), "interval can not be inactive otherwise"); |
|
5327 |
split_before_usage(it, current_pos + 1, current_pos + 1); |
|
5328 |
||
5329 |
} else { |
|
5330 |
// search the position where the interval must have a register and split |
|
5331 |
// at the optimal position before. |
|
5332 |
// The new created part is added to the unhandled list and will get a register |
|
5333 |
// when it is activated |
|
5334 |
int min_split_pos = current_pos + 1; |
|
5335 |
int max_split_pos = MIN2(it->next_usage(mustHaveRegister, min_split_pos), it->to()); |
|
5336 |
||
5337 |
split_before_usage(it, min_split_pos, max_split_pos); |
|
5338 |
||
5339 |
assert(it->next_usage(mustHaveRegister, current_pos) == max_jint, "the remaining part is spilled to stack and therefore has no register"); |
|
5340 |
split_for_spilling(it); |
|
5341 |
} |
|
5342 |
} |
|
5343 |
||
5344 |
||
5345 |
int LinearScanWalker::find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split) { |
|
5346 |
int min_full_reg = any_reg; |
|
5347 |
int max_partial_reg = any_reg; |
|
5348 |
||
5349 |
for (int i = _first_reg; i <= _last_reg; i++) { |
|
5350 |
if (i == ignore_reg) { |
|
5351 |
// this register must be ignored |
|
5352 |
||
5353 |
} else if (_use_pos[i] >= interval_to) { |
|
5354 |
// this register is free for the full interval |
|
5355 |
if (min_full_reg == any_reg || i == hint_reg || (_use_pos[i] < _use_pos[min_full_reg] && min_full_reg != hint_reg)) { |
|
5356 |
min_full_reg = i; |
|
5357 |
} |
|
5358 |
} else if (_use_pos[i] > reg_needed_until) { |
|
5359 |
// this register is at least free until reg_needed_until |
|
5360 |
if (max_partial_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_partial_reg] && max_partial_reg != hint_reg)) { |
|
5361 |
max_partial_reg = i; |
|
5362 |
} |
|
5363 |
} |
|
5364 |
} |
|
5365 |
||
5366 |
if (min_full_reg != any_reg) { |
|
5367 |
return min_full_reg; |
|
5368 |
} else if (max_partial_reg != any_reg) { |
|
5369 |
*need_split = true; |
|
5370 |
return max_partial_reg; |
|
5371 |
} else { |
|
5372 |
return any_reg; |
|
5373 |
} |
|
5374 |
} |
|
5375 |
||
5376 |
int LinearScanWalker::find_free_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split) { |
|
5377 |
assert((_last_reg - _first_reg + 1) % 2 == 0, "adjust algorithm"); |
|
5378 |
||
5379 |
int min_full_reg = any_reg; |
|
5380 |
int max_partial_reg = any_reg; |
|
5381 |
||
5382 |
for (int i = _first_reg; i < _last_reg; i+=2) { |
|
5383 |
if (_use_pos[i] >= interval_to && _use_pos[i + 1] >= interval_to) { |
|
5384 |
// this register is free for the full interval |
|
5385 |
if (min_full_reg == any_reg || i == hint_reg || (_use_pos[i] < _use_pos[min_full_reg] && min_full_reg != hint_reg)) { |
|
5386 |
min_full_reg = i; |
|
5387 |
} |
|
5388 |
} else if (_use_pos[i] > reg_needed_until && _use_pos[i + 1] > reg_needed_until) { |
|
5389 |
// this register is at least free until reg_needed_until |
|
5390 |
if (max_partial_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_partial_reg] && max_partial_reg != hint_reg)) { |
|
5391 |
max_partial_reg = i; |
|
5392 |
} |
|
5393 |
} |
|
5394 |
} |
|
5395 |
||
5396 |
if (min_full_reg != any_reg) { |
|
5397 |
return min_full_reg; |
|
5398 |
} else if (max_partial_reg != any_reg) { |
|
5399 |
*need_split = true; |
|
5400 |
return max_partial_reg; |
|
5401 |
} else { |
|
5402 |
return any_reg; |
|
5403 |
} |
|
5404 |
} |
|
5405 |
||
5406 |
||
5407 |
bool LinearScanWalker::alloc_free_reg(Interval* cur) { |
|
5408 |
TRACE_LINEAR_SCAN(2, tty->print("trying to find free register for "); cur->print()); |
|
5409 |
||
5410 |
init_use_lists(true); |
|
5411 |
free_exclude_active_fixed(); |
|
5412 |
free_exclude_active_any(); |
|
5413 |
free_collect_inactive_fixed(cur); |
|
5414 |
free_collect_inactive_any(cur); |
|
5415 |
// free_collect_unhandled(fixedKind, cur); |
|
5416 |
assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"); |
|
5417 |
||
5418 |
// _use_pos contains the start of the next interval that has this register assigned |
|
5419 |
// (either as a fixed register or a normal allocated register in the past) |
|
5420 |
// only intervals overlapping with cur are processed, non-overlapping invervals can be ignored safely |
|
5421 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" state of registers:")); |
|
5422 |
TRACE_LINEAR_SCAN(4, for (int i = _first_reg; i <= _last_reg; i++) tty->print_cr(" reg %d: use_pos: %d", i, _use_pos[i])); |
|
5423 |
||
5424 |
int hint_reg, hint_regHi; |
|
5425 |
Interval* register_hint = cur->register_hint(); |
|
5426 |
if (register_hint != NULL) { |
|
5427 |
hint_reg = register_hint->assigned_reg(); |
|
5428 |
hint_regHi = register_hint->assigned_regHi(); |
|
5429 |
||
5430 |
if (allocator()->is_precolored_cpu_interval(register_hint)) { |
|
5431 |
assert(hint_reg != any_reg && hint_regHi == any_reg, "must be for fixed intervals"); |
|
5432 |
hint_regHi = hint_reg + 1; // connect e.g. eax-edx |
|
5433 |
} |
|
5434 |
TRACE_LINEAR_SCAN(4, tty->print(" hint registers %d, %d from interval ", hint_reg, hint_regHi); register_hint->print()); |
|
5435 |
||
5436 |
} else { |
|
5437 |
hint_reg = any_reg; |
|
5438 |
hint_regHi = any_reg; |
|
5439 |
} |
|
5440 |
assert(hint_reg == any_reg || hint_reg != hint_regHi, "hint reg and regHi equal"); |
|
5441 |
assert(cur->assigned_reg() == any_reg && cur->assigned_regHi() == any_reg, "register already assigned to interval"); |
|
5442 |
||
5443 |
// the register must be free at least until this position |
|
5444 |
int reg_needed_until = cur->from() + 1; |
|
5445 |
int interval_to = cur->to(); |
|
5446 |
||
5447 |
bool need_split = false; |
|
5448 |
int split_pos = -1; |
|
5449 |
int reg = any_reg; |
|
5450 |
int regHi = any_reg; |
|
5451 |
||
5452 |
if (_adjacent_regs) { |
|
5453 |
reg = find_free_double_reg(reg_needed_until, interval_to, hint_reg, &need_split); |
|
5454 |
regHi = reg + 1; |
|
5455 |
if (reg == any_reg) { |
|
5456 |
return false; |
|
5457 |
} |
|
5458 |
split_pos = MIN2(_use_pos[reg], _use_pos[regHi]); |
|
5459 |
||
5460 |
} else { |
|
5461 |
reg = find_free_reg(reg_needed_until, interval_to, hint_reg, any_reg, &need_split); |
|
5462 |
if (reg == any_reg) { |
|
5463 |
return false; |
|
5464 |
} |
|
5465 |
split_pos = _use_pos[reg]; |
|
5466 |
||
5467 |
if (_num_phys_regs == 2) { |
|
5468 |
regHi = find_free_reg(reg_needed_until, interval_to, hint_regHi, reg, &need_split); |
|
5469 |
||
5470 |
if (_use_pos[reg] < interval_to && regHi == any_reg) { |
|
5471 |
// do not split interval if only one register can be assigned until the split pos |
|
5472 |
// (when one register is found for the whole interval, split&spill is only |
|
5473 |
// performed for the hi register) |
|
5474 |
return false; |
|
5475 |
||
5476 |
} else if (regHi != any_reg) { |
|
5477 |
split_pos = MIN2(split_pos, _use_pos[regHi]); |
|
5478 |
||
5479 |
// sort register numbers to prevent e.g. a move from eax,ebx to ebx,eax |
|
5480 |
if (reg > regHi) { |
|
5481 |
int temp = reg; |
|
5482 |
reg = regHi; |
|
5483 |
regHi = temp; |
|
5484 |
} |
|
5485 |
} |
|
5486 |
} |
|
5487 |
} |
|
5488 |
||
5489 |
cur->assign_reg(reg, regHi); |
|
5490 |
TRACE_LINEAR_SCAN(2, tty->print_cr("selected register %d, %d", reg, regHi)); |
|
5491 |
||
5492 |
assert(split_pos > 0, "invalid split_pos"); |
|
5493 |
if (need_split) { |
|
5494 |
// register not available for full interval, so split it |
|
5495 |
split_when_partial_register_available(cur, split_pos); |
|
5496 |
} |
|
5497 |
||
5498 |
// only return true if interval is completely assigned |
|
5499 |
return _num_phys_regs == 1 || regHi != any_reg; |
|
5500 |
} |
|
5501 |
||
5502 |
||
5503 |
int LinearScanWalker::find_locked_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split) { |
|
5504 |
int max_reg = any_reg; |
|
5505 |
||
5506 |
for (int i = _first_reg; i <= _last_reg; i++) { |
|
5507 |
if (i == ignore_reg) { |
|
5508 |
// this register must be ignored |
|
5509 |
||
5510 |
} else if (_use_pos[i] > reg_needed_until) { |
|
5511 |
if (max_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_reg] && max_reg != hint_reg)) { |
|
5512 |
max_reg = i; |
|
5513 |
} |
|
5514 |
} |
|
5515 |
} |
|
5516 |
||
5517 |
if (max_reg != any_reg && _block_pos[max_reg] <= interval_to) { |
|
5518 |
*need_split = true; |
|
5519 |
} |
|
5520 |
||
5521 |
return max_reg; |
|
5522 |
} |
|
5523 |
||
5524 |
int LinearScanWalker::find_locked_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split) { |
|
5525 |
assert((_last_reg - _first_reg + 1) % 2 == 0, "adjust algorithm"); |
|
5526 |
||
5527 |
int max_reg = any_reg; |
|
5528 |
||
5529 |
for (int i = _first_reg; i < _last_reg; i+=2) { |
|
5530 |
if (_use_pos[i] > reg_needed_until && _use_pos[i + 1] > reg_needed_until) { |
|
5531 |
if (max_reg == any_reg || _use_pos[i] > _use_pos[max_reg]) { |
|
5532 |
max_reg = i; |
|
5533 |
} |
|
5534 |
} |
|
5535 |
} |
|
5536 |
||
38658
34f9c45625d8
8140594: Various minor code improvements (compiler)
goetz
parents:
38177
diff
changeset
|
5537 |
if (max_reg != any_reg && |
34f9c45625d8
8140594: Various minor code improvements (compiler)
goetz
parents:
38177
diff
changeset
|
5538 |
(_block_pos[max_reg] <= interval_to || _block_pos[max_reg + 1] <= interval_to)) { |
1 | 5539 |
*need_split = true; |
5540 |
} |
|
5541 |
||
5542 |
return max_reg; |
|
5543 |
} |
|
5544 |
||
5545 |
void LinearScanWalker::split_and_spill_intersecting_intervals(int reg, int regHi) { |
|
5546 |
assert(reg != any_reg, "no register assigned"); |
|
5547 |
||
5548 |
for (int i = 0; i < _spill_intervals[reg]->length(); i++) { |
|
5549 |
Interval* it = _spill_intervals[reg]->at(i); |
|
5550 |
remove_from_list(it); |
|
5551 |
split_and_spill_interval(it); |
|
5552 |
} |
|
5553 |
||
5554 |
if (regHi != any_reg) { |
|
5555 |
IntervalList* processed = _spill_intervals[reg]; |
|
5556 |
for (int i = 0; i < _spill_intervals[regHi]->length(); i++) { |
|
5557 |
Interval* it = _spill_intervals[regHi]->at(i); |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
5558 |
if (processed->find(it) == -1) { |
1 | 5559 |
remove_from_list(it); |
5560 |
split_and_spill_interval(it); |
|
5561 |
} |
|
5562 |
} |
|
5563 |
} |
|
5564 |
} |
|
5565 |
||
5566 |
||
5567 |
// Split an Interval and spill it to memory so that cur can be placed in a register |
|
5568 |
void LinearScanWalker::alloc_locked_reg(Interval* cur) { |
|
5569 |
TRACE_LINEAR_SCAN(2, tty->print("need to split and spill to get register for "); cur->print()); |
|
5570 |
||
5571 |
// collect current usage of registers |
|
5572 |
init_use_lists(false); |
|
5573 |
spill_exclude_active_fixed(); |
|
5574 |
// spill_block_unhandled_fixed(cur); |
|
5575 |
assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"); |
|
5576 |
spill_block_inactive_fixed(cur); |
|
5577 |
spill_collect_active_any(); |
|
5578 |
spill_collect_inactive_any(cur); |
|
5579 |
||
5580 |
#ifndef PRODUCT |
|
5581 |
if (TraceLinearScanLevel >= 4) { |
|
5582 |
tty->print_cr(" state of registers:"); |
|
5583 |
for (int i = _first_reg; i <= _last_reg; i++) { |
|
5584 |
tty->print(" reg %d: use_pos: %d, block_pos: %d, intervals: ", i, _use_pos[i], _block_pos[i]); |
|
5585 |
for (int j = 0; j < _spill_intervals[i]->length(); j++) { |
|
5586 |
tty->print("%d ", _spill_intervals[i]->at(j)->reg_num()); |
|
5587 |
} |
|
5588 |
tty->cr(); |
|
5589 |
} |
|
5590 |
} |
|
5591 |
#endif |
|
5592 |
||
5593 |
// the register must be free at least until this position |
|
5594 |
int reg_needed_until = MIN2(cur->first_usage(mustHaveRegister), cur->from() + 1); |
|
5595 |
int interval_to = cur->to(); |
|
5596 |
assert (reg_needed_until > 0 && reg_needed_until < max_jint, "interval has no use"); |
|
5597 |
||
5598 |
int split_pos = 0; |
|
5599 |
int use_pos = 0; |
|
5600 |
bool need_split = false; |
|
5601 |
int reg, regHi; |
|
5602 |
||
5603 |
if (_adjacent_regs) { |
|
5604 |
reg = find_locked_double_reg(reg_needed_until, interval_to, any_reg, &need_split); |
|
5605 |
regHi = reg + 1; |
|
5606 |
||
5607 |
if (reg != any_reg) { |
|
5608 |
use_pos = MIN2(_use_pos[reg], _use_pos[regHi]); |
|
5609 |
split_pos = MIN2(_block_pos[reg], _block_pos[regHi]); |
|
5610 |
} |
|
5611 |
} else { |
|
5612 |
reg = find_locked_reg(reg_needed_until, interval_to, any_reg, cur->assigned_reg(), &need_split); |
|
5613 |
regHi = any_reg; |
|
5614 |
||
5615 |
if (reg != any_reg) { |
|
5616 |
use_pos = _use_pos[reg]; |
|
5617 |
split_pos = _block_pos[reg]; |
|
5618 |
||
5619 |
if (_num_phys_regs == 2) { |
|
5620 |
if (cur->assigned_reg() != any_reg) { |
|
5621 |
regHi = reg; |
|
5622 |
reg = cur->assigned_reg(); |
|
5623 |
} else { |
|
5624 |
regHi = find_locked_reg(reg_needed_until, interval_to, any_reg, reg, &need_split); |
|
5625 |
if (regHi != any_reg) { |
|
5626 |
use_pos = MIN2(use_pos, _use_pos[regHi]); |
|
5627 |
split_pos = MIN2(split_pos, _block_pos[regHi]); |
|
5628 |
} |
|
5629 |
} |
|
5630 |
||
5631 |
if (regHi != any_reg && reg > regHi) { |
|
5632 |
// sort register numbers to prevent e.g. a move from eax,ebx to ebx,eax |
|
5633 |
int temp = reg; |
|
5634 |
reg = regHi; |
|
5635 |
regHi = temp; |
|
5636 |
} |
|
5637 |
} |
|
5638 |
} |
|
5639 |
} |
|
5640 |
||
5641 |
if (reg == any_reg || (_num_phys_regs == 2 && regHi == any_reg) || use_pos <= cur->first_usage(mustHaveRegister)) { |
|
5642 |
// the first use of cur is later than the spilling position -> spill cur |
|
5643 |
TRACE_LINEAR_SCAN(4, tty->print_cr("able to spill current interval. first_usage(register): %d, use_pos: %d", cur->first_usage(mustHaveRegister), use_pos)); |
|
5644 |
||
5645 |
if (cur->first_usage(mustHaveRegister) <= cur->from() + 1) { |
|
5646 |
assert(false, "cannot spill interval that is used in first instruction (possible reason: no register found)"); |
|
5647 |
// assign a reasonable register and do a bailout in product mode to avoid errors |
|
5648 |
allocator()->assign_spill_slot(cur); |
|
5649 |
BAILOUT("LinearScan: no register found"); |
|
5650 |
} |
|
5651 |
||
5652 |
split_and_spill_interval(cur); |
|
5653 |
} else { |
|
5654 |
TRACE_LINEAR_SCAN(4, tty->print_cr("decided to use register %d, %d", reg, regHi)); |
|
5655 |
assert(reg != any_reg && (_num_phys_regs == 1 || regHi != any_reg), "no register found"); |
|
5656 |
assert(split_pos > 0, "invalid split_pos"); |
|
5657 |
assert(need_split == false || split_pos > cur->from(), "splitting interval at from"); |
|
5658 |
||
5659 |
cur->assign_reg(reg, regHi); |
|
5660 |
if (need_split) { |
|
5661 |
// register not available for full interval, so split it |
|
5662 |
split_when_partial_register_available(cur, split_pos); |
|
5663 |
} |
|
5664 |
||
5665 |
// perform splitting and spilling for all affected intervalls |
|
5666 |
split_and_spill_intersecting_intervals(reg, regHi); |
|
5667 |
} |
|
5668 |
} |
|
5669 |
||
5670 |
bool LinearScanWalker::no_allocation_possible(Interval* cur) { |
|
1066 | 5671 |
#ifdef X86 |
1 | 5672 |
// fast calculation of intervals that can never get a register because the |
5673 |
// the next instruction is a call that blocks all registers |
|
5674 |
// Note: this does not work if callee-saved registers are available (e.g. on Sparc) |
|
5675 |
||
5676 |
// check if this interval is the result of a split operation |
|
5677 |
// (an interval got a register until this position) |
|
5678 |
int pos = cur->from(); |
|
5679 |
if ((pos & 1) == 1) { |
|
5680 |
// the current instruction is a call that blocks all registers |
|
5681 |
if (pos < allocator()->max_lir_op_id() && allocator()->has_call(pos + 1)) { |
|
5682 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" free register cannot be available because all registers blocked by following call")); |
|
5683 |
||
5684 |
// safety check that there is really no register available |
|
5685 |
assert(alloc_free_reg(cur) == false, "found a register for this interval"); |
|
5686 |
return true; |
|
5687 |
} |
|
5688 |
||
5689 |
} |
|
5690 |
#endif |
|
5691 |
return false; |
|
5692 |
} |
|
5693 |
||
5694 |
void LinearScanWalker::init_vars_for_alloc(Interval* cur) { |
|
5695 |
BasicType type = cur->type(); |
|
5696 |
_num_phys_regs = LinearScan::num_physical_regs(type); |
|
5697 |
_adjacent_regs = LinearScan::requires_adjacent_regs(type); |
|
5698 |
||
5699 |
if (pd_init_regs_for_alloc(cur)) { |
|
5700 |
// the appropriate register range was selected. |
|
5701 |
} else if (type == T_FLOAT || type == T_DOUBLE) { |
|
5702 |
_first_reg = pd_first_fpu_reg; |
|
5703 |
_last_reg = pd_last_fpu_reg; |
|
5704 |
} else { |
|
5705 |
_first_reg = pd_first_cpu_reg; |
|
7427 | 5706 |
_last_reg = FrameMap::last_cpu_reg(); |
1 | 5707 |
} |
5708 |
||
5709 |
assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range"); |
|
5710 |
assert(0 <= _last_reg && _last_reg < LinearScan::nof_regs, "out of range"); |
|
5711 |
} |
|
5712 |
||
5713 |
||
5714 |
bool LinearScanWalker::is_move(LIR_Op* op, Interval* from, Interval* to) { |
|
5715 |
if (op->code() != lir_move) { |
|
5716 |
return false; |
|
5717 |
} |
|
5718 |
assert(op->as_Op1() != NULL, "move must be LIR_Op1"); |
|
5719 |
||
5720 |
LIR_Opr in = ((LIR_Op1*)op)->in_opr(); |
|
5721 |
LIR_Opr res = ((LIR_Op1*)op)->result_opr(); |
|
5722 |
return in->is_virtual() && res->is_virtual() && in->vreg_number() == from->reg_num() && res->vreg_number() == to->reg_num(); |
|
5723 |
} |
|
5724 |
||
5725 |
// optimization (especially for phi functions of nested loops): |
|
5726 |
// assign same spill slot to non-intersecting intervals |
|
5727 |
void LinearScanWalker::combine_spilled_intervals(Interval* cur) { |
|
5728 |
if (cur->is_split_child()) { |
|
5729 |
// optimization is only suitable for split parents |
|
5730 |
return; |
|
5731 |
} |
|
5732 |
||
5733 |
Interval* register_hint = cur->register_hint(false); |
|
5734 |
if (register_hint == NULL) { |
|
5735 |
// cur is not the target of a move, otherwise register_hint would be set |
|
5736 |
return; |
|
5737 |
} |
|
5738 |
assert(register_hint->is_split_parent(), "register hint must be split parent"); |
|
5739 |
||
5740 |
if (cur->spill_state() != noOptimization || register_hint->spill_state() != noOptimization) { |
|
5741 |
// combining the stack slots for intervals where spill move optimization is applied |
|
5742 |
// is not benefitial and would cause problems |
|
5743 |
return; |
|
5744 |
} |
|
5745 |
||
5746 |
int begin_pos = cur->from(); |
|
5747 |
int end_pos = cur->to(); |
|
5748 |
if (end_pos > allocator()->max_lir_op_id() || (begin_pos & 1) != 0 || (end_pos & 1) != 0) { |
|
5749 |
// safety check that lir_op_with_id is allowed |
|
5750 |
return; |
|
5751 |
} |
|
5752 |
||
5753 |
if (!is_move(allocator()->lir_op_with_id(begin_pos), register_hint, cur) || !is_move(allocator()->lir_op_with_id(end_pos), cur, register_hint)) { |
|
5754 |
// cur and register_hint are not connected with two moves |
|
5755 |
return; |
|
5756 |
} |
|
5757 |
||
5758 |
Interval* begin_hint = register_hint->split_child_at_op_id(begin_pos, LIR_OpVisitState::inputMode); |
|
5759 |
Interval* end_hint = register_hint->split_child_at_op_id(end_pos, LIR_OpVisitState::outputMode); |
|
5760 |
if (begin_hint == end_hint || begin_hint->to() != begin_pos || end_hint->from() != end_pos) { |
|
5761 |
// register_hint must be split, otherwise the re-writing of use positions does not work |
|
5762 |
return; |
|
5763 |
} |
|
5764 |
||
5765 |
assert(begin_hint->assigned_reg() != any_reg, "must have register assigned"); |
|
5766 |
assert(end_hint->assigned_reg() == any_reg, "must not have register assigned"); |
|
5767 |
assert(cur->first_usage(mustHaveRegister) == begin_pos, "must have use position at begin of interval because of move"); |
|
5768 |
assert(end_hint->first_usage(mustHaveRegister) == end_pos, "must have use position at begin of interval because of move"); |
|
5769 |
||
5770 |
if (begin_hint->assigned_reg() < LinearScan::nof_regs) { |
|
5771 |
// register_hint is not spilled at begin_pos, so it would not be benefitial to immediately spill cur |
|
5772 |
return; |
|
5773 |
} |
|
5774 |
assert(register_hint->canonical_spill_slot() != -1, "must be set when part of interval was spilled"); |
|
5775 |
||
5776 |
// modify intervals such that cur gets the same stack slot as register_hint |
|
5777 |
// delete use positions to prevent the intervals to get a register at beginning |
|
5778 |
cur->set_canonical_spill_slot(register_hint->canonical_spill_slot()); |
|
5779 |
cur->remove_first_use_pos(); |
|
5780 |
end_hint->remove_first_use_pos(); |
|
5781 |
} |
|
5782 |
||
5783 |
||
5784 |
// allocate a physical register or memory location to an interval |
|
5785 |
bool LinearScanWalker::activate_current() { |
|
5786 |
Interval* cur = current(); |
|
5787 |
bool result = true; |
|
5788 |
||
5789 |
TRACE_LINEAR_SCAN(2, tty->print ("+++++ activating interval "); cur->print()); |
|
5790 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" split_parent: %d, insert_move_when_activated: %d", cur->split_parent()->reg_num(), cur->insert_move_when_activated())); |
|
5791 |
||
5792 |
if (cur->assigned_reg() >= LinearScan::nof_regs) { |
|
5793 |
// activating an interval that has a stack slot assigned -> split it at first use position |
|
5794 |
// used for method parameters |
|
5795 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" interval has spill slot assigned (method parameter) -> split it before first use")); |
|
5796 |
||
5797 |
split_stack_interval(cur); |
|
5798 |
result = false; |
|
5799 |
||
5800 |
} else if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::must_start_in_memory)) { |
|
5801 |
// activating an interval that must start in a stack slot, but may get a register later |
|
5802 |
// used for lir_roundfp: rounding is done by store to stack and reload later |
|
5803 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" interval must start in stack slot -> split it before first use")); |
|
5804 |
assert(cur->assigned_reg() == any_reg && cur->assigned_regHi() == any_reg, "register already assigned"); |
|
5805 |
||
5806 |
allocator()->assign_spill_slot(cur); |
|
5807 |
split_stack_interval(cur); |
|
5808 |
result = false; |
|
5809 |
||
5810 |
} else if (cur->assigned_reg() == any_reg) { |
|
5811 |
// interval has not assigned register -> normal allocation |
|
5812 |
// (this is the normal case for most intervals) |
|
5813 |
TRACE_LINEAR_SCAN(4, tty->print_cr(" normal allocation of register")); |
|
5814 |
||
5815 |
// assign same spill slot to non-intersecting intervals |
|
5816 |
combine_spilled_intervals(cur); |
|
5817 |
||
5818 |
init_vars_for_alloc(cur); |
|
5819 |
if (no_allocation_possible(cur) || !alloc_free_reg(cur)) { |
|
5820 |
// no empty register available. |
|
5821 |
// split and spill another interval so that this interval gets a register |
|
5822 |
alloc_locked_reg(cur); |
|
5823 |
} |
|
5824 |
||
5825 |
// spilled intervals need not be move to active-list |
|
5826 |
if (cur->assigned_reg() >= LinearScan::nof_regs) { |
|
5827 |
result = false; |
|
5828 |
} |
|
5829 |
} |
|
5830 |
||
5831 |
// load spilled values that become active from stack slot to register |
|
5832 |
if (cur->insert_move_when_activated()) { |
|
5833 |
assert(cur->is_split_child(), "must be"); |
|
5834 |
assert(cur->current_split_child() != NULL, "must be"); |
|
5835 |
assert(cur->current_split_child()->reg_num() != cur->reg_num(), "cannot insert move between same interval"); |
|
5836 |
TRACE_LINEAR_SCAN(4, tty->print_cr("Inserting move from interval %d to %d because insert_move_when_activated is set", cur->current_split_child()->reg_num(), cur->reg_num())); |
|
5837 |
||
5838 |
insert_move(cur->from(), cur->current_split_child(), cur); |
|
5839 |
} |
|
5840 |
cur->make_current_split_child(); |
|
5841 |
||
5842 |
return result; // true = interval is moved to active list |
|
5843 |
} |
|
5844 |
||
5845 |
||
5846 |
// Implementation of EdgeMoveOptimizer |
|
5847 |
||
5848 |
EdgeMoveOptimizer::EdgeMoveOptimizer() : |
|
5849 |
_edge_instructions(4), |
|
5850 |
_edge_instructions_idx(4) |
|
5851 |
{ |
|
5852 |
} |
|
5853 |
||
5854 |
void EdgeMoveOptimizer::optimize(BlockList* code) { |
|
5855 |
EdgeMoveOptimizer optimizer = EdgeMoveOptimizer(); |
|
5856 |
||
5857 |
// ignore the first block in the list (index 0 is not processed) |
|
5858 |
for (int i = code->length() - 1; i >= 1; i--) { |
|
5859 |
BlockBegin* block = code->at(i); |
|
5860 |
||
5861 |
if (block->number_of_preds() > 1 && !block->is_set(BlockBegin::exception_entry_flag)) { |
|
5862 |
optimizer.optimize_moves_at_block_end(block); |
|
5863 |
} |
|
5864 |
if (block->number_of_sux() == 2) { |
|
5865 |
optimizer.optimize_moves_at_block_begin(block); |
|
5866 |
} |
|
5867 |
} |
|
5868 |
} |
|
5869 |
||
5870 |
||
5871 |
// clear all internal data structures |
|
5872 |
void EdgeMoveOptimizer::init_instructions() { |
|
5873 |
_edge_instructions.clear(); |
|
5874 |
_edge_instructions_idx.clear(); |
|
5875 |
} |
|
5876 |
||
5877 |
// append a lir-instruction-list and the index of the current operation in to the list |
|
5878 |
void EdgeMoveOptimizer::append_instructions(LIR_OpList* instructions, int instructions_idx) { |
|
5879 |
_edge_instructions.append(instructions); |
|
5880 |
_edge_instructions_idx.append(instructions_idx); |
|
5881 |
} |
|
5882 |
||
5883 |
// return the current operation of the given edge (predecessor or successor) |
|
5884 |
LIR_Op* EdgeMoveOptimizer::instruction_at(int edge) { |
|
5885 |
LIR_OpList* instructions = _edge_instructions.at(edge); |
|
5886 |
int idx = _edge_instructions_idx.at(edge); |
|
5887 |
||
5888 |
if (idx < instructions->length()) { |
|
5889 |
return instructions->at(idx); |
|
5890 |
} else { |
|
5891 |
return NULL; |
|
5892 |
} |
|
5893 |
} |
|
5894 |
||
5895 |
// removes the current operation of the given edge (predecessor or successor) |
|
5896 |
void EdgeMoveOptimizer::remove_cur_instruction(int edge, bool decrement_index) { |
|
5897 |
LIR_OpList* instructions = _edge_instructions.at(edge); |
|
5898 |
int idx = _edge_instructions_idx.at(edge); |
|
5899 |
instructions->remove_at(idx); |
|
5900 |
||
5901 |
if (decrement_index) { |
|
5902 |
_edge_instructions_idx.at_put(edge, idx - 1); |
|
5903 |
} |
|
5904 |
} |
|
5905 |
||
5906 |
||
5907 |
bool EdgeMoveOptimizer::operations_different(LIR_Op* op1, LIR_Op* op2) { |
|
5908 |
if (op1 == NULL || op2 == NULL) { |
|
5909 |
// at least one block is already empty -> no optimization possible |
|
5910 |
return true; |
|
5911 |
} |
|
5912 |
||
5913 |
if (op1->code() == lir_move && op2->code() == lir_move) { |
|
5914 |
assert(op1->as_Op1() != NULL, "move must be LIR_Op1"); |
|
5915 |
assert(op2->as_Op1() != NULL, "move must be LIR_Op1"); |
|
5916 |
LIR_Op1* move1 = (LIR_Op1*)op1; |
|
5917 |
LIR_Op1* move2 = (LIR_Op1*)op2; |
|
5918 |
if (move1->info() == move2->info() && move1->in_opr() == move2->in_opr() && move1->result_opr() == move2->result_opr()) { |
|
5919 |
// these moves are exactly equal and can be optimized |
|
5920 |
return false; |
|
5921 |
} |
|
5922 |
||
5923 |
} else if (op1->code() == lir_fxch && op2->code() == lir_fxch) { |
|
5924 |
assert(op1->as_Op1() != NULL, "fxch must be LIR_Op1"); |
|
5925 |
assert(op2->as_Op1() != NULL, "fxch must be LIR_Op1"); |
|
5926 |
LIR_Op1* fxch1 = (LIR_Op1*)op1; |
|
5927 |
LIR_Op1* fxch2 = (LIR_Op1*)op2; |
|
5928 |
if (fxch1->in_opr()->as_jint() == fxch2->in_opr()->as_jint()) { |
|
5929 |
// equal FPU stack operations can be optimized |
|
5930 |
return false; |
|
5931 |
} |
|
5932 |
||
5933 |
} else if (op1->code() == lir_fpop_raw && op2->code() == lir_fpop_raw) { |
|
5934 |
// equal FPU stack operations can be optimized |
|
5935 |
return false; |
|
5936 |
} |
|
5937 |
||
5938 |
// no optimization possible |
|
5939 |
return true; |
|
5940 |
} |
|
5941 |
||
5942 |
void EdgeMoveOptimizer::optimize_moves_at_block_end(BlockBegin* block) { |
|
5943 |
TRACE_LINEAR_SCAN(4, tty->print_cr("optimizing moves at end of block B%d", block->block_id())); |
|
5944 |
||
5945 |
if (block->is_predecessor(block)) { |
|
5946 |
// currently we can't handle this correctly. |
|
5947 |
return; |
|
5948 |
} |
|
5949 |
||
5950 |
init_instructions(); |
|
5951 |
int num_preds = block->number_of_preds(); |
|
5952 |
assert(num_preds > 1, "do not call otherwise"); |
|
5953 |
assert(!block->is_set(BlockBegin::exception_entry_flag), "exception handlers not allowed"); |
|
5954 |
||
5955 |
// setup a list with the lir-instructions of all predecessors |
|
5956 |
int i; |
|
5957 |
for (i = 0; i < num_preds; i++) { |
|
5958 |
BlockBegin* pred = block->pred_at(i); |
|
5959 |
LIR_OpList* pred_instructions = pred->lir()->instructions_list(); |
|
5960 |
||
5961 |
if (pred->number_of_sux() != 1) { |
|
5962 |
// this can happen with switch-statements where multiple edges are between |
|
5963 |
// the same blocks. |
|
5964 |
return; |
|
5965 |
} |
|
5966 |
||
5967 |
assert(pred->number_of_sux() == 1, "can handle only one successor"); |
|
5968 |
assert(pred->sux_at(0) == block, "invalid control flow"); |
|
5969 |
assert(pred_instructions->last()->code() == lir_branch, "block with successor must end with branch"); |
|
5970 |
assert(pred_instructions->last()->as_OpBranch() != NULL, "branch must be LIR_OpBranch"); |
|
5971 |
assert(pred_instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block must end with unconditional branch"); |
|
5972 |
||
5973 |
if (pred_instructions->last()->info() != NULL) { |
|
5974 |
// can not optimize instructions when debug info is needed |
|
5975 |
return; |
|
5976 |
} |
|
5977 |
||
5978 |
// ignore the unconditional branch at the end of the block |
|
5979 |
append_instructions(pred_instructions, pred_instructions->length() - 2); |
|
5980 |
} |
|
5981 |
||
5982 |
||
5983 |
// process lir-instructions while all predecessors end with the same instruction |
|
5984 |
while (true) { |
|
5985 |
LIR_Op* op = instruction_at(0); |
|
5986 |
for (i = 1; i < num_preds; i++) { |
|
5987 |
if (operations_different(op, instruction_at(i))) { |
|
5988 |
// these instructions are different and cannot be optimized -> |
|
5989 |
// no further optimization possible |
|
5990 |
return; |
|
5991 |
} |
|
5992 |
} |
|
5993 |
||
5994 |
TRACE_LINEAR_SCAN(4, tty->print("found instruction that is equal in all %d predecessors: ", num_preds); op->print()); |
|
5995 |
||
5996 |
// insert the instruction at the beginning of the current block |
|
5997 |
block->lir()->insert_before(1, op); |
|
5998 |
||
5999 |
// delete the instruction at the end of all predecessors |
|
6000 |
for (i = 0; i < num_preds; i++) { |
|
6001 |
remove_cur_instruction(i, true); |
|
6002 |
} |
|
6003 |
} |
|
6004 |
} |
|
6005 |
||
6006 |
||
6007 |
void EdgeMoveOptimizer::optimize_moves_at_block_begin(BlockBegin* block) { |
|
6008 |
TRACE_LINEAR_SCAN(4, tty->print_cr("optimization moves at begin of block B%d", block->block_id())); |
|
6009 |
||
6010 |
init_instructions(); |
|
6011 |
int num_sux = block->number_of_sux(); |
|
6012 |
||
6013 |
LIR_OpList* cur_instructions = block->lir()->instructions_list(); |
|
6014 |
||
6015 |
assert(num_sux == 2, "method should not be called otherwise"); |
|
6016 |
assert(cur_instructions->last()->code() == lir_branch, "block with successor must end with branch"); |
|
6017 |
assert(cur_instructions->last()->as_OpBranch() != NULL, "branch must be LIR_OpBranch"); |
|
6018 |
assert(cur_instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block must end with unconditional branch"); |
|
6019 |
||
6020 |
if (cur_instructions->last()->info() != NULL) { |
|
6021 |
// can no optimize instructions when debug info is needed |
|
6022 |
return; |
|
6023 |
} |
|
6024 |
||
6025 |
LIR_Op* branch = cur_instructions->at(cur_instructions->length() - 2); |
|
6026 |
if (branch->info() != NULL || (branch->code() != lir_branch && branch->code() != lir_cond_float_branch)) { |
|
6027 |
// not a valid case for optimization |
|
6028 |
// currently, only blocks that end with two branches (conditional branch followed |
|
6029 |
// by unconditional branch) are optimized |
|
6030 |
return; |
|
6031 |
} |
|
6032 |
||
6033 |
// now it is guaranteed that the block ends with two branch instructions. |
|
6034 |
// the instructions are inserted at the end of the block before these two branches |
|
6035 |
int insert_idx = cur_instructions->length() - 2; |
|
6036 |
||
6037 |
int i; |
|
6038 |
#ifdef ASSERT |
|
6039 |
for (i = insert_idx - 1; i >= 0; i--) { |
|
6040 |
LIR_Op* op = cur_instructions->at(i); |
|
6041 |
if ((op->code() == lir_branch || op->code() == lir_cond_float_branch) && ((LIR_OpBranch*)op)->block() != NULL) { |
|
6042 |
assert(false, "block with two successors can have only two branch instructions"); |
|
6043 |
} |
|
6044 |
} |
|
6045 |
#endif |
|
6046 |
||
6047 |
// setup a list with the lir-instructions of all successors |
|
6048 |
for (i = 0; i < num_sux; i++) { |
|
6049 |
BlockBegin* sux = block->sux_at(i); |
|
6050 |
LIR_OpList* sux_instructions = sux->lir()->instructions_list(); |
|
6051 |
||
6052 |
assert(sux_instructions->at(0)->code() == lir_label, "block must start with label"); |
|
6053 |
||
6054 |
if (sux->number_of_preds() != 1) { |
|
6055 |
// this can happen with switch-statements where multiple edges are between |
|
6056 |
// the same blocks. |
|
6057 |
return; |
|
6058 |
} |
|
6059 |
assert(sux->pred_at(0) == block, "invalid control flow"); |
|
6060 |
assert(!sux->is_set(BlockBegin::exception_entry_flag), "exception handlers not allowed"); |
|
6061 |
||
6062 |
// ignore the label at the beginning of the block |
|
6063 |
append_instructions(sux_instructions, 1); |
|
6064 |
} |
|
6065 |
||
6066 |
// process lir-instructions while all successors begin with the same instruction |
|
6067 |
while (true) { |
|
6068 |
LIR_Op* op = instruction_at(0); |
|
6069 |
for (i = 1; i < num_sux; i++) { |
|
6070 |
if (operations_different(op, instruction_at(i))) { |
|
6071 |
// these instructions are different and cannot be optimized -> |
|
6072 |
// no further optimization possible |
|
6073 |
return; |
|
6074 |
} |
|
6075 |
} |
|
6076 |
||
6077 |
TRACE_LINEAR_SCAN(4, tty->print("----- found instruction that is equal in all %d successors: ", num_sux); op->print()); |
|
6078 |
||
6079 |
// insert instruction at end of current block |
|
6080 |
block->lir()->insert_before(insert_idx, op); |
|
6081 |
insert_idx++; |
|
6082 |
||
6083 |
// delete the instructions at the beginning of all successors |
|
6084 |
for (i = 0; i < num_sux; i++) { |
|
6085 |
remove_cur_instruction(i, false); |
|
6086 |
} |
|
6087 |
} |
|
6088 |
} |
|
6089 |
||
6090 |
||
6091 |
// Implementation of ControlFlowOptimizer |
|
6092 |
||
6093 |
ControlFlowOptimizer::ControlFlowOptimizer() : |
|
6094 |
_original_preds(4) |
|
6095 |
{ |
|
6096 |
} |
|
6097 |
||
6098 |
void ControlFlowOptimizer::optimize(BlockList* code) { |
|
6099 |
ControlFlowOptimizer optimizer = ControlFlowOptimizer(); |
|
6100 |
||
6101 |
// push the OSR entry block to the end so that we're not jumping over it. |
|
6102 |
BlockBegin* osr_entry = code->at(0)->end()->as_Base()->osr_entry(); |
|
6103 |
if (osr_entry) { |
|
6104 |
int index = osr_entry->linear_scan_number(); |
|
6105 |
assert(code->at(index) == osr_entry, "wrong index"); |
|
6106 |
code->remove_at(index); |
|
6107 |
code->append(osr_entry); |
|
6108 |
} |
|
6109 |
||
6110 |
optimizer.reorder_short_loops(code); |
|
6111 |
optimizer.delete_empty_blocks(code); |
|
6112 |
optimizer.delete_unnecessary_jumps(code); |
|
6113 |
optimizer.delete_jumps_to_return(code); |
|
6114 |
} |
|
6115 |
||
6116 |
void ControlFlowOptimizer::reorder_short_loop(BlockList* code, BlockBegin* header_block, int header_idx) { |
|
6117 |
int i = header_idx + 1; |
|
6118 |
int max_end = MIN2(header_idx + ShortLoopSize, code->length()); |
|
6119 |
while (i < max_end && code->at(i)->loop_depth() >= header_block->loop_depth()) { |
|
6120 |
i++; |
|
6121 |
} |
|
6122 |
||
6123 |
if (i == code->length() || code->at(i)->loop_depth() < header_block->loop_depth()) { |
|
6124 |
int end_idx = i - 1; |
|
6125 |
BlockBegin* end_block = code->at(end_idx); |
|
6126 |
||
6127 |
if (end_block->number_of_sux() == 1 && end_block->sux_at(0) == header_block) { |
|
6128 |
// short loop from header_idx to end_idx found -> reorder blocks such that |
|
6129 |
// the header_block is the last block instead of the first block of the loop |
|
6130 |
TRACE_LINEAR_SCAN(1, tty->print_cr("Reordering short loop: length %d, header B%d, end B%d", |
|
6131 |
end_idx - header_idx + 1, |
|
6132 |
header_block->block_id(), end_block->block_id())); |
|
6133 |
||
6134 |
for (int j = header_idx; j < end_idx; j++) { |
|
6135 |
code->at_put(j, code->at(j + 1)); |
|
6136 |
} |
|
6137 |
code->at_put(end_idx, header_block); |
|
6138 |
||
6139 |
// correct the flags so that any loop alignment occurs in the right place. |
|
6140 |
assert(code->at(end_idx)->is_set(BlockBegin::backward_branch_target_flag), "must be backward branch target"); |
|
6141 |
code->at(end_idx)->clear(BlockBegin::backward_branch_target_flag); |
|
6142 |
code->at(header_idx)->set(BlockBegin::backward_branch_target_flag); |
|
6143 |
} |
|
6144 |
} |
|
6145 |
} |
|
6146 |
||
6147 |
void ControlFlowOptimizer::reorder_short_loops(BlockList* code) { |
|
6148 |
for (int i = code->length() - 1; i >= 0; i--) { |
|
6149 |
BlockBegin* block = code->at(i); |
|
6150 |
||
6151 |
if (block->is_set(BlockBegin::linear_scan_loop_header_flag)) { |
|
6152 |
reorder_short_loop(code, block, i); |
|
6153 |
} |
|
6154 |
} |
|
6155 |
||
6156 |
DEBUG_ONLY(verify(code)); |
|
6157 |
} |
|
6158 |
||
6159 |
// only blocks with exactly one successor can be deleted. Such blocks |
|
6160 |
// must always end with an unconditional branch to this successor |
|
6161 |
bool ControlFlowOptimizer::can_delete_block(BlockBegin* block) { |
|
6162 |
if (block->number_of_sux() != 1 || block->number_of_exception_handlers() != 0 || block->is_entry_block()) { |
|
6163 |
return false; |
|
6164 |
} |
|
6165 |
||
6166 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
6167 |
||
6168 |
assert(instructions->length() >= 2, "block must have label and branch"); |
|
6169 |
assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label"); |
|
6170 |
assert(instructions->last()->as_OpBranch() != NULL, "last instrcution must always be a branch"); |
|
6171 |
assert(instructions->last()->as_OpBranch()->cond() == lir_cond_always, "branch must be unconditional"); |
|
6172 |
assert(instructions->last()->as_OpBranch()->block() == block->sux_at(0), "branch target must be the successor"); |
|
6173 |
||
6174 |
// block must have exactly one successor |
|
6175 |
||
6176 |
if (instructions->length() == 2 && instructions->last()->info() == NULL) { |
|
6177 |
return true; |
|
6178 |
} |
|
6179 |
return false; |
|
6180 |
} |
|
6181 |
||
6182 |
// substitute branch targets in all branch-instructions of this blocks |
|
6183 |
void ControlFlowOptimizer::substitute_branch_target(BlockBegin* block, BlockBegin* target_from, BlockBegin* target_to) { |
|
6184 |
TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting empty block: substituting from B%d to B%d inside B%d", target_from->block_id(), target_to->block_id(), block->block_id())); |
|
6185 |
||
6186 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
6187 |
||
6188 |
assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label"); |
|
6189 |
for (int i = instructions->length() - 1; i >= 1; i--) { |
|
6190 |
LIR_Op* op = instructions->at(i); |
|
6191 |
||
6192 |
if (op->code() == lir_branch || op->code() == lir_cond_float_branch) { |
|
6193 |
assert(op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch"); |
|
6194 |
LIR_OpBranch* branch = (LIR_OpBranch*)op; |
|
6195 |
||
6196 |
if (branch->block() == target_from) { |
|
6197 |
branch->change_block(target_to); |
|
6198 |
} |
|
6199 |
if (branch->ublock() == target_from) { |
|
6200 |
branch->change_ublock(target_to); |
|
6201 |
} |
|
6202 |
} |
|
6203 |
} |
|
6204 |
} |
|
6205 |
||
6206 |
void ControlFlowOptimizer::delete_empty_blocks(BlockList* code) { |
|
6207 |
int old_pos = 0; |
|
6208 |
int new_pos = 0; |
|
6209 |
int num_blocks = code->length(); |
|
6210 |
||
6211 |
while (old_pos < num_blocks) { |
|
6212 |
BlockBegin* block = code->at(old_pos); |
|
6213 |
||
6214 |
if (can_delete_block(block)) { |
|
6215 |
BlockBegin* new_target = block->sux_at(0); |
|
6216 |
||
6217 |
// propagate backward branch target flag for correct code alignment |
|
6218 |
if (block->is_set(BlockBegin::backward_branch_target_flag)) { |
|
6219 |
new_target->set(BlockBegin::backward_branch_target_flag); |
|
6220 |
} |
|
6221 |
||
6222 |
// collect a list with all predecessors that contains each predecessor only once |
|
6223 |
// the predecessors of cur are changed during the substitution, so a copy of the |
|
6224 |
// predecessor list is necessary |
|
6225 |
int j; |
|
6226 |
_original_preds.clear(); |
|
6227 |
for (j = block->number_of_preds() - 1; j >= 0; j--) { |
|
6228 |
BlockBegin* pred = block->pred_at(j); |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
6229 |
if (_original_preds.find(pred) == -1) { |
1 | 6230 |
_original_preds.append(pred); |
6231 |
} |
|
6232 |
} |
|
6233 |
||
6234 |
for (j = _original_preds.length() - 1; j >= 0; j--) { |
|
6235 |
BlockBegin* pred = _original_preds.at(j); |
|
6236 |
substitute_branch_target(pred, block, new_target); |
|
6237 |
pred->substitute_sux(block, new_target); |
|
6238 |
} |
|
6239 |
} else { |
|
6240 |
// adjust position of this block in the block list if blocks before |
|
6241 |
// have been deleted |
|
6242 |
if (new_pos != old_pos) { |
|
6243 |
code->at_put(new_pos, code->at(old_pos)); |
|
6244 |
} |
|
6245 |
new_pos++; |
|
6246 |
} |
|
6247 |
old_pos++; |
|
6248 |
} |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
6249 |
code->trunc_to(new_pos); |
1 | 6250 |
|
6251 |
DEBUG_ONLY(verify(code)); |
|
6252 |
} |
|
6253 |
||
6254 |
void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) { |
|
6255 |
// skip the last block because there a branch is always necessary |
|
6256 |
for (int i = code->length() - 2; i >= 0; i--) { |
|
6257 |
BlockBegin* block = code->at(i); |
|
6258 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
6259 |
||
6260 |
LIR_Op* last_op = instructions->last(); |
|
6261 |
if (last_op->code() == lir_branch) { |
|
6262 |
assert(last_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch"); |
|
6263 |
LIR_OpBranch* last_branch = (LIR_OpBranch*)last_op; |
|
6264 |
||
6265 |
assert(last_branch->block() != NULL, "last branch must always have a block as target"); |
|
6266 |
assert(last_branch->label() == last_branch->block()->label(), "must be equal"); |
|
6267 |
||
6268 |
if (last_branch->info() == NULL) { |
|
6269 |
if (last_branch->block() == code->at(i + 1)) { |
|
6270 |
||
6271 |
TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting unconditional branch at end of block B%d", block->block_id())); |
|
6272 |
||
6273 |
// delete last branch instruction |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
6274 |
instructions->trunc_to(instructions->length() - 1); |
1 | 6275 |
|
6276 |
} else { |
|
6277 |
LIR_Op* prev_op = instructions->at(instructions->length() - 2); |
|
6278 |
if (prev_op->code() == lir_branch || prev_op->code() == lir_cond_float_branch) { |
|
6279 |
assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch"); |
|
6280 |
LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op; |
|
6281 |
||
16611 | 6282 |
if (prev_branch->stub() == NULL) { |
6283 |
||
6284 |
LIR_Op2* prev_cmp = NULL; |
|
34170
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6285 |
// There might be a cmove inserted for profiling which depends on the same |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6286 |
// compare. If we change the condition of the respective compare, we have |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6287 |
// to take care of this cmove as well. |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6288 |
LIR_Op2* prev_cmove = NULL; |
16611 | 6289 |
|
6290 |
for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) { |
|
6291 |
prev_op = instructions->at(j); |
|
34170
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6292 |
// check for the cmove |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6293 |
if (prev_op->code() == lir_cmove) { |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6294 |
assert(prev_op->as_Op2() != NULL, "cmove must be of type LIR_Op2"); |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6295 |
prev_cmove = (LIR_Op2*)prev_op; |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6296 |
assert(prev_branch->cond() == prev_cmove->condition(), "should be the same"); |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6297 |
} |
16611 | 6298 |
if (prev_op->code() == lir_cmp) { |
6299 |
assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2"); |
|
6300 |
prev_cmp = (LIR_Op2*)prev_op; |
|
6301 |
assert(prev_branch->cond() == prev_cmp->condition(), "should be the same"); |
|
6302 |
} |
|
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5707
diff
changeset
|
6303 |
} |
16611 | 6304 |
assert(prev_cmp != NULL, "should have found comp instruction for branch"); |
6305 |
if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) { |
|
6306 |
||
6307 |
TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id())); |
|
6308 |
||
6309 |
// eliminate a conditional branch to the immediate successor |
|
6310 |
prev_branch->change_block(last_branch->block()); |
|
6311 |
prev_branch->negate_cond(); |
|
6312 |
prev_cmp->set_condition(prev_branch->cond()); |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
6313 |
instructions->trunc_to(instructions->length() - 1); |
34170
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6314 |
// if we do change the condition, we have to change the cmove as well |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6315 |
if (prev_cmove != NULL) { |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6316 |
prev_cmove->set_condition(prev_branch->cond()); |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6317 |
LIR_Opr t = prev_cmove->in_opr1(); |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6318 |
prev_cmove->set_in_opr1(prev_cmove->in_opr2()); |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6319 |
prev_cmove->set_in_opr2(t); |
628b8ec31f2b
8142314: Bug in C1 ControlFlowOptimizer::delete_unnecessary_jumps with bytecode profiling
simonis
parents:
33465
diff
changeset
|
6320 |
} |
16611 | 6321 |
} |
1 | 6322 |
} |
6323 |
} |
|
6324 |
} |
|
6325 |
} |
|
6326 |
} |
|
6327 |
} |
|
6328 |
||
6329 |
DEBUG_ONLY(verify(code)); |
|
6330 |
} |
|
6331 |
||
6332 |
void ControlFlowOptimizer::delete_jumps_to_return(BlockList* code) { |
|
6333 |
#ifdef ASSERT |
|
38177 | 6334 |
ResourceBitMap return_converted(BlockBegin::number_of_blocks()); |
1 | 6335 |
#endif |
6336 |
||
6337 |
for (int i = code->length() - 1; i >= 0; i--) { |
|
6338 |
BlockBegin* block = code->at(i); |
|
6339 |
LIR_OpList* cur_instructions = block->lir()->instructions_list(); |
|
6340 |
LIR_Op* cur_last_op = cur_instructions->last(); |
|
6341 |
||
6342 |
assert(cur_instructions->at(0)->code() == lir_label, "first instruction must always be a label"); |
|
6343 |
if (cur_instructions->length() == 2 && cur_last_op->code() == lir_return) { |
|
6344 |
// the block contains only a label and a return |
|
6345 |
// if a predecessor ends with an unconditional jump to this block, then the jump |
|
6346 |
// can be replaced with a return instruction |
|
6347 |
// |
|
6348 |
// Note: the original block with only a return statement cannot be deleted completely |
|
6349 |
// because the predecessors might have other (conditional) jumps to this block |
|
6350 |
// -> this may lead to unnecesary return instructions in the final code |
|
6351 |
||
6352 |
assert(cur_last_op->info() == NULL, "return instructions do not have debug information"); |
|
6353 |
assert(block->number_of_sux() == 0 || |
|
6354 |
(return_converted.at(block->block_id()) && block->number_of_sux() == 1), |
|
6355 |
"blocks that end with return must not have successors"); |
|
6356 |
||
6357 |
assert(cur_last_op->as_Op1() != NULL, "return must be LIR_Op1"); |
|
6358 |
LIR_Opr return_opr = ((LIR_Op1*)cur_last_op)->in_opr(); |
|
6359 |
||
6360 |
for (int j = block->number_of_preds() - 1; j >= 0; j--) { |
|
6361 |
BlockBegin* pred = block->pred_at(j); |
|
6362 |
LIR_OpList* pred_instructions = pred->lir()->instructions_list(); |
|
6363 |
LIR_Op* pred_last_op = pred_instructions->last(); |
|
6364 |
||
6365 |
if (pred_last_op->code() == lir_branch) { |
|
6366 |
assert(pred_last_op->as_OpBranch() != NULL, "branch must be LIR_OpBranch"); |
|
6367 |
LIR_OpBranch* pred_last_branch = (LIR_OpBranch*)pred_last_op; |
|
6368 |
||
6369 |
if (pred_last_branch->block() == block && pred_last_branch->cond() == lir_cond_always && pred_last_branch->info() == NULL) { |
|
6370 |
// replace the jump to a return with a direct return |
|
6371 |
// Note: currently the edge between the blocks is not deleted |
|
6372 |
pred_instructions->at_put(pred_instructions->length() - 1, new LIR_Op1(lir_return, return_opr)); |
|
6373 |
#ifdef ASSERT |
|
6374 |
return_converted.set_bit(pred->block_id()); |
|
6375 |
#endif |
|
6376 |
} |
|
6377 |
} |
|
6378 |
} |
|
6379 |
} |
|
6380 |
} |
|
6381 |
} |
|
6382 |
||
6383 |
||
6384 |
#ifdef ASSERT |
|
6385 |
void ControlFlowOptimizer::verify(BlockList* code) { |
|
6386 |
for (int i = 0; i < code->length(); i++) { |
|
6387 |
BlockBegin* block = code->at(i); |
|
6388 |
LIR_OpList* instructions = block->lir()->instructions_list(); |
|
6389 |
||
6390 |
int j; |
|
6391 |
for (j = 0; j < instructions->length(); j++) { |
|
6392 |
LIR_OpBranch* op_branch = instructions->at(j)->as_OpBranch(); |
|
6393 |
||
6394 |
if (op_branch != NULL) { |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
6395 |
assert(op_branch->block() == NULL || code->find(op_branch->block()) != -1, "branch target not valid"); |
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
6396 |
assert(op_branch->ublock() == NULL || code->find(op_branch->ublock()) != -1, "branch target not valid"); |
1 | 6397 |
} |
6398 |
} |
|
6399 |
||
6400 |
for (j = 0; j < block->number_of_sux() - 1; j++) { |
|
6401 |
BlockBegin* sux = block->sux_at(j); |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
6402 |
assert(code->find(sux) != -1, "successor not valid"); |
1 | 6403 |
} |
6404 |
||
6405 |
for (j = 0; j < block->number_of_preds() - 1; j++) { |
|
6406 |
BlockBegin* pred = block->pred_at(j); |
|
38031
e0b822facc03
8149374: Replace C1-specific collection classes with universal collection classes
fzhinkin
parents:
36302
diff
changeset
|
6407 |
assert(code->find(pred) != -1, "successor not valid"); |
1 | 6408 |
} |
6409 |
} |
|
6410 |
} |
|
6411 |
#endif |
|
6412 |
||
6413 |
||
6414 |
#ifndef PRODUCT |
|
6415 |
||
6416 |
// Implementation of LinearStatistic |
|
6417 |
||
6418 |
const char* LinearScanStatistic::counter_name(int counter_idx) { |
|
6419 |
switch (counter_idx) { |
|
6420 |
case counter_method: return "compiled methods"; |
|
6421 |
case counter_fpu_method: return "methods using fpu"; |
|
6422 |
case counter_loop_method: return "methods with loops"; |
|
6423 |
case counter_exception_method:return "methods with xhandler"; |
|
6424 |
||
6425 |
case counter_loop: return "loops"; |
|
6426 |
case counter_block: return "blocks"; |
|
6427 |
case counter_loop_block: return "blocks inside loop"; |
|
6428 |
case counter_exception_block: return "exception handler entries"; |
|
6429 |
case counter_interval: return "intervals"; |
|
6430 |
case counter_fixed_interval: return "fixed intervals"; |
|
6431 |
case counter_range: return "ranges"; |
|
6432 |
case counter_fixed_range: return "fixed ranges"; |
|
6433 |
case counter_use_pos: return "use positions"; |
|
6434 |
case counter_fixed_use_pos: return "fixed use positions"; |
|
6435 |
case counter_spill_slots: return "spill slots"; |
|
6436 |
||
6437 |
// counter for classes of lir instructions |
|
6438 |
case counter_instruction: return "total instructions"; |
|
6439 |
case counter_label: return "labels"; |
|
6440 |
case counter_entry: return "method entries"; |
|
6441 |
case counter_return: return "method returns"; |
|
6442 |
case counter_call: return "method calls"; |
|
6443 |
case counter_move: return "moves"; |
|
6444 |
case counter_cmp: return "compare"; |
|
6445 |
case counter_cond_branch: return "conditional branches"; |
|
6446 |
case counter_uncond_branch: return "unconditional branches"; |
|
6447 |
case counter_stub_branch: return "branches to stub"; |
|
6448 |
case counter_alu: return "artithmetic + logic"; |
|
6449 |
case counter_alloc: return "allocations"; |
|
6450 |
case counter_sync: return "synchronisation"; |
|
6451 |
case counter_throw: return "throw"; |
|
6452 |
case counter_unwind: return "unwind"; |
|
6453 |
case counter_typecheck: return "type+null-checks"; |
|
6454 |
case counter_fpu_stack: return "fpu-stack"; |
|
6455 |
case counter_misc_inst: return "other instructions"; |
|
6456 |
case counter_other_inst: return "misc. instructions"; |
|
6457 |
||
6458 |
// counter for different types of moves |
|
6459 |
case counter_move_total: return "total moves"; |
|
6460 |
case counter_move_reg_reg: return "register->register"; |
|
6461 |
case counter_move_reg_stack: return "register->stack"; |
|
6462 |
case counter_move_stack_reg: return "stack->register"; |
|
6463 |
case counter_move_stack_stack:return "stack->stack"; |
|
6464 |
case counter_move_reg_mem: return "register->memory"; |
|
6465 |
case counter_move_mem_reg: return "memory->register"; |
|
6466 |
case counter_move_const_any: return "constant->any"; |
|
6467 |
||
6468 |
case blank_line_1: return ""; |
|
6469 |
case blank_line_2: return ""; |
|
6470 |
||
6471 |
default: ShouldNotReachHere(); return ""; |
|
6472 |
} |
|
6473 |
} |
|
6474 |
||
6475 |
LinearScanStatistic::Counter LinearScanStatistic::base_counter(int counter_idx) { |
|
6476 |
if (counter_idx == counter_fpu_method || counter_idx == counter_loop_method || counter_idx == counter_exception_method) { |
|
6477 |
return counter_method; |
|
6478 |
} else if (counter_idx == counter_loop_block || counter_idx == counter_exception_block) { |
|
6479 |
return counter_block; |
|
6480 |
} else if (counter_idx >= counter_instruction && counter_idx <= counter_other_inst) { |
|
6481 |
return counter_instruction; |
|
6482 |
} else if (counter_idx >= counter_move_total && counter_idx <= counter_move_const_any) { |
|
6483 |
return counter_move_total; |
|
6484 |
} |
|
6485 |
return invalid_counter; |
|
6486 |
} |
|
6487 |
||
6488 |
LinearScanStatistic::LinearScanStatistic() { |
|
6489 |
for (int i = 0; i < number_of_counters; i++) { |
|
6490 |
_counters_sum[i] = 0; |
|
6491 |
_counters_max[i] = -1; |
|
6492 |
} |
|
6493 |
||
6494 |
} |
|
6495 |
||
6496 |
// add the method-local numbers to the total sum |
|
6497 |
void LinearScanStatistic::sum_up(LinearScanStatistic &method_statistic) { |
|
6498 |
for (int i = 0; i < number_of_counters; i++) { |
|
6499 |
_counters_sum[i] += method_statistic._counters_sum[i]; |
|
6500 |
_counters_max[i] = MAX2(_counters_max[i], method_statistic._counters_sum[i]); |
|
6501 |
} |
|
6502 |
} |
|
6503 |
||
6504 |
void LinearScanStatistic::print(const char* title) { |
|
6505 |
if (CountLinearScan || TraceLinearScanLevel > 0) { |
|
6506 |
tty->cr(); |
|
6507 |
tty->print_cr("***** LinearScan statistic - %s *****", title); |
|
6508 |
||
6509 |
for (int i = 0; i < number_of_counters; i++) { |
|
6510 |
if (_counters_sum[i] > 0 || _counters_max[i] >= 0) { |
|
6511 |
tty->print("%25s: %8d", counter_name(i), _counters_sum[i]); |
|
6512 |
||
38658
34f9c45625d8
8140594: Various minor code improvements (compiler)
goetz
parents:
38177
diff
changeset
|
6513 |
LinearScanStatistic::Counter cntr = base_counter(i); |
34f9c45625d8
8140594: Various minor code improvements (compiler)
goetz
parents:
38177
diff
changeset
|
6514 |
if (cntr != invalid_counter) { |
34f9c45625d8
8140594: Various minor code improvements (compiler)
goetz
parents:
38177
diff
changeset
|
6515 |
tty->print(" (%5.1f%%) ", _counters_sum[i] * 100.0 / _counters_sum[cntr]); |
1 | 6516 |
} else { |
6517 |
tty->print(" "); |
|
6518 |
} |
|
6519 |
||
6520 |
if (_counters_max[i] >= 0) { |
|
6521 |
tty->print("%8d", _counters_max[i]); |
|
6522 |
} |
|
6523 |
} |
|
6524 |
tty->cr(); |
|
6525 |
} |
|
6526 |
} |
|
6527 |
} |
|
6528 |
||
6529 |
void LinearScanStatistic::collect(LinearScan* allocator) { |
|
6530 |
inc_counter(counter_method); |
|
6531 |
if (allocator->has_fpu_registers()) { |
|
6532 |
inc_counter(counter_fpu_method); |
|
6533 |
} |
|
6534 |
if (allocator->num_loops() > 0) { |
|
6535 |
inc_counter(counter_loop_method); |
|
6536 |
} |
|
6537 |
inc_counter(counter_loop, allocator->num_loops()); |
|
6538 |
inc_counter(counter_spill_slots, allocator->max_spills()); |
|
6539 |
||
6540 |
int i; |
|
6541 |
for (i = 0; i < allocator->interval_count(); i++) { |
|
6542 |
Interval* cur = allocator->interval_at(i); |
|
6543 |
||
6544 |
if (cur != NULL) { |
|
6545 |
inc_counter(counter_interval); |
|
6546 |
inc_counter(counter_use_pos, cur->num_use_positions()); |
|
6547 |
if (LinearScan::is_precolored_interval(cur)) { |
|
6548 |
inc_counter(counter_fixed_interval); |
|
6549 |
inc_counter(counter_fixed_use_pos, cur->num_use_positions()); |
|
6550 |
} |
|
6551 |
||
6552 |
Range* range = cur->first(); |
|
6553 |
while (range != Range::end()) { |
|
6554 |
inc_counter(counter_range); |
|
6555 |
if (LinearScan::is_precolored_interval(cur)) { |
|
6556 |
inc_counter(counter_fixed_range); |
|
6557 |
} |
|
6558 |
range = range->next(); |
|
6559 |
} |
|
6560 |
} |
|
6561 |
} |
|
6562 |
||
6563 |
bool has_xhandlers = false; |
|
6564 |
// Note: only count blocks that are in code-emit order |
|
6565 |
for (i = 0; i < allocator->ir()->code()->length(); i++) { |
|
6566 |
BlockBegin* cur = allocator->ir()->code()->at(i); |
|
6567 |
||
6568 |
inc_counter(counter_block); |
|
6569 |
if (cur->loop_depth() > 0) { |
|
6570 |
inc_counter(counter_loop_block); |
|
6571 |
} |
|
6572 |
if (cur->is_set(BlockBegin::exception_entry_flag)) { |
|
6573 |
inc_counter(counter_exception_block); |
|
6574 |
has_xhandlers = true; |
|
6575 |
} |
|
6576 |
||
6577 |
LIR_OpList* instructions = cur->lir()->instructions_list(); |
|
6578 |
for (int j = 0; j < instructions->length(); j++) { |
|
6579 |
LIR_Op* op = instructions->at(j); |
|
6580 |
||
6581 |
inc_counter(counter_instruction); |
|
6582 |
||
6583 |
switch (op->code()) { |
|
6584 |
case lir_label: inc_counter(counter_label); break; |
|
6585 |
case lir_std_entry: |
|
6586 |
case lir_osr_entry: inc_counter(counter_entry); break; |
|
6587 |
case lir_return: inc_counter(counter_return); break; |
|
6588 |
||
6589 |
case lir_rtcall: |
|
6590 |
case lir_static_call: |
|
6591 |
case lir_optvirtual_call: |
|
6592 |
case lir_virtual_call: inc_counter(counter_call); break; |
|
6593 |
||
6594 |
case lir_move: { |
|
6595 |
inc_counter(counter_move); |
|
6596 |
inc_counter(counter_move_total); |
|
6597 |
||
6598 |
LIR_Opr in = op->as_Op1()->in_opr(); |
|
6599 |
LIR_Opr res = op->as_Op1()->result_opr(); |
|
6600 |
if (in->is_register()) { |
|
6601 |
if (res->is_register()) { |
|
6602 |
inc_counter(counter_move_reg_reg); |
|
6603 |
} else if (res->is_stack()) { |
|
6604 |
inc_counter(counter_move_reg_stack); |
|
6605 |
} else if (res->is_address()) { |
|
6606 |
inc_counter(counter_move_reg_mem); |
|
6607 |
} else { |
|
6608 |
ShouldNotReachHere(); |
|
6609 |
} |
|
6610 |
} else if (in->is_stack()) { |
|
6611 |
if (res->is_register()) { |
|
6612 |
inc_counter(counter_move_stack_reg); |
|
6613 |
} else { |
|
6614 |
inc_counter(counter_move_stack_stack); |
|
6615 |
} |
|
6616 |
} else if (in->is_address()) { |
|
6617 |
assert(res->is_register(), "must be"); |
|
6618 |
inc_counter(counter_move_mem_reg); |
|
6619 |
} else if (in->is_constant()) { |
|
6620 |
inc_counter(counter_move_const_any); |
|
6621 |
} else { |
|
6622 |
ShouldNotReachHere(); |
|
6623 |
} |
|
6624 |
break; |
|
6625 |
} |
|
6626 |
||
6627 |
case lir_cmp: inc_counter(counter_cmp); break; |
|
6628 |
||
6629 |
case lir_branch: |
|
6630 |
case lir_cond_float_branch: { |
|
6631 |
LIR_OpBranch* branch = op->as_OpBranch(); |
|
6632 |
if (branch->block() == NULL) { |
|
6633 |
inc_counter(counter_stub_branch); |
|
6634 |
} else if (branch->cond() == lir_cond_always) { |
|
6635 |
inc_counter(counter_uncond_branch); |
|
6636 |
} else { |
|
6637 |
inc_counter(counter_cond_branch); |
|
6638 |
} |
|
6639 |
break; |
|
6640 |
} |
|
6641 |
||
6642 |
case lir_neg: |
|
6643 |
case lir_add: |
|
6644 |
case lir_sub: |
|
6645 |
case lir_mul: |
|
6646 |
case lir_mul_strictfp: |
|
6647 |
case lir_div: |
|
6648 |
case lir_div_strictfp: |
|
6649 |
case lir_rem: |
|
6650 |
case lir_sqrt: |
|
6651 |
case lir_abs: |
|
6652 |
case lir_log10: |
|
6653 |
case lir_logic_and: |
|
6654 |
case lir_logic_or: |
|
6655 |
case lir_logic_xor: |
|
6656 |
case lir_shl: |
|
6657 |
case lir_shr: |
|
6658 |
case lir_ushr: inc_counter(counter_alu); break; |
|
6659 |
||
6660 |
case lir_alloc_object: |
|
6661 |
case lir_alloc_array: inc_counter(counter_alloc); break; |
|
6662 |
||
6663 |
case lir_monaddr: |
|
6664 |
case lir_lock: |
|
6665 |
case lir_unlock: inc_counter(counter_sync); break; |
|
6666 |
||
6667 |
case lir_throw: inc_counter(counter_throw); break; |
|
6668 |
||
6669 |
case lir_unwind: inc_counter(counter_unwind); break; |
|
6670 |
||
6671 |
case lir_null_check: |
|
6672 |
case lir_leal: |
|
6673 |
case lir_instanceof: |
|
6674 |
case lir_checkcast: |
|
6675 |
case lir_store_check: inc_counter(counter_typecheck); break; |
|
6676 |
||
6677 |
case lir_fpop_raw: |
|
6678 |
case lir_fxch: |
|
6679 |
case lir_fld: inc_counter(counter_fpu_stack); break; |
|
6680 |
||
6681 |
case lir_nop: |
|
6682 |
case lir_push: |
|
6683 |
case lir_pop: |
|
6684 |
case lir_convert: |
|
6685 |
case lir_roundfp: |
|
6686 |
case lir_cmove: inc_counter(counter_misc_inst); break; |
|
6687 |
||
6688 |
default: inc_counter(counter_other_inst); break; |
|
6689 |
} |
|
6690 |
} |
|
6691 |
} |
|
6692 |
||
6693 |
if (has_xhandlers) { |
|
6694 |
inc_counter(counter_exception_method); |
|
6695 |
} |
|
6696 |
} |
|
6697 |
||
6698 |
void LinearScanStatistic::compute(LinearScan* allocator, LinearScanStatistic &global_statistic) { |
|
6699 |
if (CountLinearScan || TraceLinearScanLevel > 0) { |
|
6700 |
||
6701 |
LinearScanStatistic local_statistic = LinearScanStatistic(); |
|
6702 |
||
6703 |
local_statistic.collect(allocator); |
|
6704 |
global_statistic.sum_up(local_statistic); |
|
6705 |
||
6706 |
if (TraceLinearScanLevel > 2) { |
|
6707 |
local_statistic.print("current local statistic"); |
|
6708 |
} |
|
6709 |
} |
|
6710 |
} |
|
6711 |
||
6712 |
||
6713 |
// Implementation of LinearTimers |
|
6714 |
||
6715 |
LinearScanTimers::LinearScanTimers() { |
|
6716 |
for (int i = 0; i < number_of_timers; i++) { |
|
6717 |
timer(i)->reset(); |
|
6718 |
} |
|
6719 |
} |
|
6720 |
||
6721 |
const char* LinearScanTimers::timer_name(int idx) { |
|
6722 |
switch (idx) { |
|
6723 |
case timer_do_nothing: return "Nothing (Time Check)"; |
|
6724 |
case timer_number_instructions: return "Number Instructions"; |
|
6725 |
case timer_compute_local_live_sets: return "Local Live Sets"; |
|
6726 |
case timer_compute_global_live_sets: return "Global Live Sets"; |
|
6727 |
case timer_build_intervals: return "Build Intervals"; |
|
6728 |
case timer_sort_intervals_before: return "Sort Intervals Before"; |
|
6729 |
case timer_allocate_registers: return "Allocate Registers"; |
|
6730 |
case timer_resolve_data_flow: return "Resolve Data Flow"; |
|
6731 |
case timer_sort_intervals_after: return "Sort Intervals After"; |
|
6732 |
case timer_eliminate_spill_moves: return "Spill optimization"; |
|
6733 |
case timer_assign_reg_num: return "Assign Reg Num"; |
|
6734 |
case timer_allocate_fpu_stack: return "Allocate FPU Stack"; |
|
6735 |
case timer_optimize_lir: return "Optimize LIR"; |
|
6736 |
default: ShouldNotReachHere(); return ""; |
|
6737 |
} |
|
6738 |
} |
|
6739 |
||
6740 |
void LinearScanTimers::begin_method() { |
|
6741 |
if (TimeEachLinearScan) { |
|
6742 |
// reset all timers to measure only current method |
|
6743 |
for (int i = 0; i < number_of_timers; i++) { |
|
6744 |
timer(i)->reset(); |
|
6745 |
} |
|
6746 |
} |
|
6747 |
} |
|
6748 |
||
6749 |
void LinearScanTimers::end_method(LinearScan* allocator) { |
|
6750 |
if (TimeEachLinearScan) { |
|
6751 |
||
6752 |
double c = timer(timer_do_nothing)->seconds(); |
|
6753 |
double total = 0; |
|
6754 |
for (int i = 1; i < number_of_timers; i++) { |
|
6755 |
total += timer(i)->seconds() - c; |
|
6756 |
} |
|
6757 |
||
6758 |
if (total >= 0.0005) { |
|
6759 |
// print all information in one line for automatic processing |
|
6760 |
tty->print("@"); allocator->compilation()->method()->print_name(); |
|
6761 |
||
6762 |
tty->print("@ %d ", allocator->compilation()->method()->code_size()); |
|
6763 |
tty->print("@ %d ", allocator->block_at(allocator->block_count() - 1)->last_lir_instruction_id() / 2); |
|
6764 |
tty->print("@ %d ", allocator->block_count()); |
|
6765 |
tty->print("@ %d ", allocator->num_virtual_regs()); |
|
6766 |
tty->print("@ %d ", allocator->interval_count()); |
|
6767 |
tty->print("@ %d ", allocator->_num_calls); |
|
6768 |
tty->print("@ %d ", allocator->num_loops()); |
|
6769 |
||
6770 |
tty->print("@ %6.6f ", total); |
|
6771 |
for (int i = 1; i < number_of_timers; i++) { |
|
6772 |
tty->print("@ %4.1f ", ((timer(i)->seconds() - c) / total) * 100); |
|
6773 |
} |
|
6774 |
tty->cr(); |
|
6775 |
} |
|
6776 |
} |
|
6777 |
} |
|
6778 |
||
6779 |
void LinearScanTimers::print(double total_time) { |
|
6780 |
if (TimeLinearScan) { |
|
6781 |
// correction value: sum of dummy-timer that only measures the time that |
|
6782 |
// is necesary to start and stop itself |
|
6783 |
double c = timer(timer_do_nothing)->seconds(); |
|
6784 |
||
6785 |
for (int i = 0; i < number_of_timers; i++) { |
|
6786 |
double t = timer(i)->seconds(); |
|
6787 |
tty->print_cr(" %25s: %6.3f s (%4.1f%%) corrected: %6.3f s (%4.1f%%)", timer_name(i), t, (t / total_time) * 100.0, t - c, (t - c) / (total_time - 2 * number_of_timers * c) * 100); |
|
6788 |
} |
|
6789 |
} |
|
6790 |
} |
|
6791 |
||
6792 |
#endif // #ifndef PRODUCT |