author | roland |
Fri, 15 Oct 2010 09:38:20 +0200 | |
changeset 7100 | 6bcf9255d470 |
parent 6745 | a34ef8968a84 |
child 7397 | 5b173b4ca846 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5046
diff
changeset
|
2 |
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5046
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5046
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5046
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
25 |
# include "incls/_precompiled.incl" |
|
26 |
# include "incls/_c1_IR.cpp.incl" |
|
27 |
||
28 |
||
29 |
// Implementation of XHandlers |
|
30 |
// |
|
31 |
// Note: This code could eventually go away if we are |
|
32 |
// just using the ciExceptionHandlerStream. |
|
33 |
||
34 |
XHandlers::XHandlers(ciMethod* method) : _list(method->exception_table_length()) { |
|
35 |
ciExceptionHandlerStream s(method); |
|
36 |
while (!s.is_done()) { |
|
37 |
_list.append(new XHandler(s.handler())); |
|
38 |
s.next(); |
|
39 |
} |
|
40 |
assert(s.count() == method->exception_table_length(), "exception table lengths inconsistent"); |
|
41 |
} |
|
42 |
||
43 |
// deep copy of all XHandler contained in list |
|
44 |
XHandlers::XHandlers(XHandlers* other) : |
|
45 |
_list(other->length()) |
|
46 |
{ |
|
47 |
for (int i = 0; i < other->length(); i++) { |
|
48 |
_list.append(new XHandler(other->handler_at(i))); |
|
49 |
} |
|
50 |
} |
|
51 |
||
52 |
// Returns whether a particular exception type can be caught. Also |
|
53 |
// returns true if klass is unloaded or any exception handler |
|
54 |
// classes are unloaded. type_is_exact indicates whether the throw |
|
55 |
// is known to be exactly that class or it might throw a subtype. |
|
56 |
bool XHandlers::could_catch(ciInstanceKlass* klass, bool type_is_exact) const { |
|
57 |
// the type is unknown so be conservative |
|
58 |
if (!klass->is_loaded()) { |
|
59 |
return true; |
|
60 |
} |
|
61 |
||
62 |
for (int i = 0; i < length(); i++) { |
|
63 |
XHandler* handler = handler_at(i); |
|
64 |
if (handler->is_catch_all()) { |
|
65 |
// catch of ANY |
|
66 |
return true; |
|
67 |
} |
|
68 |
ciInstanceKlass* handler_klass = handler->catch_klass(); |
|
69 |
// if it's unknown it might be catchable |
|
70 |
if (!handler_klass->is_loaded()) { |
|
71 |
return true; |
|
72 |
} |
|
73 |
// if the throw type is definitely a subtype of the catch type |
|
74 |
// then it can be caught. |
|
75 |
if (klass->is_subtype_of(handler_klass)) { |
|
76 |
return true; |
|
77 |
} |
|
78 |
if (!type_is_exact) { |
|
79 |
// If the type isn't exactly known then it can also be caught by |
|
80 |
// catch statements where the inexact type is a subtype of the |
|
81 |
// catch type. |
|
82 |
// given: foo extends bar extends Exception |
|
83 |
// throw bar can be caught by catch foo, catch bar, and catch |
|
84 |
// Exception, however it can't be caught by any handlers without |
|
85 |
// bar in its type hierarchy. |
|
86 |
if (handler_klass->is_subtype_of(klass)) { |
|
87 |
return true; |
|
88 |
} |
|
89 |
} |
|
90 |
} |
|
91 |
||
92 |
return false; |
|
93 |
} |
|
94 |
||
95 |
||
96 |
bool XHandlers::equals(XHandlers* others) const { |
|
97 |
if (others == NULL) return false; |
|
98 |
if (length() != others->length()) return false; |
|
99 |
||
100 |
for (int i = 0; i < length(); i++) { |
|
101 |
if (!handler_at(i)->equals(others->handler_at(i))) return false; |
|
102 |
} |
|
103 |
return true; |
|
104 |
} |
|
105 |
||
106 |
bool XHandler::equals(XHandler* other) const { |
|
107 |
assert(entry_pco() != -1 && other->entry_pco() != -1, "must have entry_pco"); |
|
108 |
||
109 |
if (entry_pco() != other->entry_pco()) return false; |
|
110 |
if (scope_count() != other->scope_count()) return false; |
|
111 |
if (_desc != other->_desc) return false; |
|
112 |
||
113 |
assert(entry_block() == other->entry_block(), "entry_block must be equal when entry_pco is equal"); |
|
114 |
return true; |
|
115 |
} |
|
116 |
||
117 |
||
118 |
// Implementation of IRScope |
|
119 |
BlockBegin* IRScope::build_graph(Compilation* compilation, int osr_bci) { |
|
120 |
GraphBuilder gm(compilation, this); |
|
121 |
NOT_PRODUCT(if (PrintValueNumbering && Verbose) gm.print_stats()); |
|
122 |
if (compilation->bailed_out()) return NULL; |
|
123 |
return gm.start(); |
|
124 |
} |
|
125 |
||
126 |
||
127 |
IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMethod* method, int osr_bci, bool create_graph) |
|
128 |
: _callees(2) |
|
129 |
, _compilation(compilation) |
|
130 |
, _requires_phi_function(method->max_locals()) |
|
131 |
{ |
|
132 |
_caller = caller; |
|
133 |
_level = caller == NULL ? 0 : caller->level() + 1; |
|
134 |
_method = method; |
|
135 |
_xhandlers = new XHandlers(method); |
|
136 |
_number_of_locks = 0; |
|
137 |
_monitor_pairing_ok = method->has_balanced_monitors(); |
|
138 |
_start = NULL; |
|
139 |
||
140 |
if (osr_bci == -1) { |
|
141 |
_requires_phi_function.clear(); |
|
142 |
} else { |
|
143 |
// selective creation of phi functions is not possibel in osr-methods |
|
144 |
_requires_phi_function.set_range(0, method->max_locals()); |
|
145 |
} |
|
146 |
||
147 |
assert(method->holder()->is_loaded() , "method holder must be loaded"); |
|
148 |
||
149 |
// build graph if monitor pairing is ok |
|
150 |
if (create_graph && monitor_pairing_ok()) _start = build_graph(compilation, osr_bci); |
|
151 |
} |
|
152 |
||
153 |
||
154 |
int IRScope::max_stack() const { |
|
155 |
int my_max = method()->max_stack(); |
|
156 |
int callee_max = 0; |
|
157 |
for (int i = 0; i < number_of_callees(); i++) { |
|
158 |
callee_max = MAX2(callee_max, callee_no(i)->max_stack()); |
|
159 |
} |
|
160 |
return my_max + callee_max; |
|
161 |
} |
|
162 |
||
163 |
||
3600
27aa4477d039
6833129: specjvm98 fails with NullPointerException in the compiler with -XX:DeoptimizeALot
cfang
parents:
1623
diff
changeset
|
164 |
bool IRScopeDebugInfo::should_reexecute() { |
27aa4477d039
6833129: specjvm98 fails with NullPointerException in the compiler with -XX:DeoptimizeALot
cfang
parents:
1623
diff
changeset
|
165 |
ciMethod* cur_method = scope()->method(); |
27aa4477d039
6833129: specjvm98 fails with NullPointerException in the compiler with -XX:DeoptimizeALot
cfang
parents:
1623
diff
changeset
|
166 |
int cur_bci = bci(); |
27aa4477d039
6833129: specjvm98 fails with NullPointerException in the compiler with -XX:DeoptimizeALot
cfang
parents:
1623
diff
changeset
|
167 |
if (cur_method != NULL && cur_bci != SynchronizationEntryBCI) { |
27aa4477d039
6833129: specjvm98 fails with NullPointerException in the compiler with -XX:DeoptimizeALot
cfang
parents:
1623
diff
changeset
|
168 |
Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci); |
27aa4477d039
6833129: specjvm98 fails with NullPointerException in the compiler with -XX:DeoptimizeALot
cfang
parents:
1623
diff
changeset
|
169 |
return Interpreter::bytecode_should_reexecute(code); |
27aa4477d039
6833129: specjvm98 fails with NullPointerException in the compiler with -XX:DeoptimizeALot
cfang
parents:
1623
diff
changeset
|
170 |
} else |
27aa4477d039
6833129: specjvm98 fails with NullPointerException in the compiler with -XX:DeoptimizeALot
cfang
parents:
1623
diff
changeset
|
171 |
return false; |
27aa4477d039
6833129: specjvm98 fails with NullPointerException in the compiler with -XX:DeoptimizeALot
cfang
parents:
1623
diff
changeset
|
172 |
} |
1 | 173 |
|
174 |
||
175 |
// Implementation of CodeEmitInfo |
|
176 |
||
177 |
// Stack must be NON-null |
|
6745 | 178 |
CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers) |
1 | 179 |
: _scope(stack->scope()) |
180 |
, _scope_debug_info(NULL) |
|
181 |
, _oop_map(NULL) |
|
182 |
, _stack(stack) |
|
183 |
, _exception_handlers(exception_handlers) |
|
5687 | 184 |
, _is_method_handle_invoke(false) { |
1 | 185 |
assert(_stack != NULL, "must be non null"); |
186 |
} |
|
187 |
||
188 |
||
6745 | 189 |
CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack) |
1 | 190 |
: _scope(info->_scope) |
191 |
, _exception_handlers(NULL) |
|
192 |
, _scope_debug_info(NULL) |
|
5687 | 193 |
, _oop_map(NULL) |
6745 | 194 |
, _stack(stack == NULL ? info->_stack : stack) |
5687 | 195 |
, _is_method_handle_invoke(info->_is_method_handle_invoke) { |
1 | 196 |
|
197 |
// deep copy of exception handlers |
|
198 |
if (info->_exception_handlers != NULL) { |
|
199 |
_exception_handlers = new XHandlers(info->_exception_handlers); |
|
200 |
} |
|
201 |
} |
|
202 |
||
203 |
||
5687 | 204 |
void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) { |
1 | 205 |
// record the safepoint before recording the debug info for enclosing scopes |
206 |
recorder->add_safepoint(pc_offset, _oop_map->deep_copy()); |
|
5687 | 207 |
_scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, _is_method_handle_invoke); |
1 | 208 |
recorder->end_safepoint(pc_offset); |
209 |
} |
|
210 |
||
211 |
||
212 |
void CodeEmitInfo::add_register_oop(LIR_Opr opr) { |
|
213 |
assert(_oop_map != NULL, "oop map must already exist"); |
|
214 |
assert(opr->is_single_cpu(), "should not call otherwise"); |
|
215 |
||
216 |
VMReg name = frame_map()->regname(opr); |
|
217 |
_oop_map->set_oop(name); |
|
218 |
} |
|
219 |
||
220 |
||
221 |
||
222 |
||
223 |
// Implementation of IR |
|
224 |
||
225 |
IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) : |
|
226 |
_locals_size(in_WordSize(-1)) |
|
227 |
, _num_loops(0) { |
|
228 |
// setup IR fields |
|
229 |
_compilation = compilation; |
|
230 |
_top_scope = new IRScope(compilation, NULL, -1, method, osr_bci, true); |
|
231 |
_code = NULL; |
|
232 |
} |
|
233 |
||
234 |
||
235 |
void IR::optimize() { |
|
236 |
Optimizer opt(this); |
|
6453 | 237 |
if (!compilation()->profile_branches()) { |
238 |
if (DoCEE) { |
|
239 |
opt.eliminate_conditional_expressions(); |
|
1 | 240 |
#ifndef PRODUCT |
6453 | 241 |
if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after CEE"); print(true); } |
242 |
if (PrintIR || PrintIR1 ) { tty->print_cr("IR after CEE"); print(false); } |
|
1 | 243 |
#endif |
6453 | 244 |
} |
245 |
if (EliminateBlocks) { |
|
246 |
opt.eliminate_blocks(); |
|
1 | 247 |
#ifndef PRODUCT |
6453 | 248 |
if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after block elimination"); print(true); } |
249 |
if (PrintIR || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); } |
|
1 | 250 |
#endif |
6453 | 251 |
} |
1 | 252 |
} |
253 |
if (EliminateNullChecks) { |
|
254 |
opt.eliminate_null_checks(); |
|
255 |
#ifndef PRODUCT |
|
256 |
if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after null check elimination"); print(true); } |
|
257 |
if (PrintIR || PrintIR1 ) { tty->print_cr("IR after null check elimination"); print(false); } |
|
258 |
#endif |
|
259 |
} |
|
260 |
} |
|
261 |
||
262 |
||
263 |
static int sort_pairs(BlockPair** a, BlockPair** b) { |
|
264 |
if ((*a)->from() == (*b)->from()) { |
|
265 |
return (*a)->to()->block_id() - (*b)->to()->block_id(); |
|
266 |
} else { |
|
267 |
return (*a)->from()->block_id() - (*b)->from()->block_id(); |
|
268 |
} |
|
269 |
} |
|
270 |
||
271 |
||
272 |
class CriticalEdgeFinder: public BlockClosure { |
|
273 |
BlockPairList blocks; |
|
274 |
IR* _ir; |
|
275 |
||
276 |
public: |
|
277 |
CriticalEdgeFinder(IR* ir): _ir(ir) {} |
|
278 |
void block_do(BlockBegin* bb) { |
|
279 |
BlockEnd* be = bb->end(); |
|
280 |
int nos = be->number_of_sux(); |
|
281 |
if (nos >= 2) { |
|
282 |
for (int i = 0; i < nos; i++) { |
|
283 |
BlockBegin* sux = be->sux_at(i); |
|
284 |
if (sux->number_of_preds() >= 2) { |
|
285 |
blocks.append(new BlockPair(bb, sux)); |
|
286 |
} |
|
287 |
} |
|
288 |
} |
|
289 |
} |
|
290 |
||
291 |
void split_edges() { |
|
292 |
BlockPair* last_pair = NULL; |
|
293 |
blocks.sort(sort_pairs); |
|
294 |
for (int i = 0; i < blocks.length(); i++) { |
|
295 |
BlockPair* pair = blocks.at(i); |
|
296 |
if (last_pair != NULL && pair->is_same(last_pair)) continue; |
|
297 |
BlockBegin* from = pair->from(); |
|
298 |
BlockBegin* to = pair->to(); |
|
299 |
BlockBegin* split = from->insert_block_between(to); |
|
300 |
#ifndef PRODUCT |
|
301 |
if ((PrintIR || PrintIR1) && Verbose) { |
|
302 |
tty->print_cr("Split critical edge B%d -> B%d (new block B%d)", |
|
303 |
from->block_id(), to->block_id(), split->block_id()); |
|
304 |
} |
|
305 |
#endif |
|
306 |
last_pair = pair; |
|
307 |
} |
|
308 |
} |
|
309 |
}; |
|
310 |
||
311 |
void IR::split_critical_edges() { |
|
312 |
CriticalEdgeFinder cef(this); |
|
313 |
||
314 |
iterate_preorder(&cef); |
|
315 |
cef.split_edges(); |
|
316 |
} |
|
317 |
||
318 |
||
5707 | 319 |
class UseCountComputer: public ValueVisitor, BlockClosure { |
1 | 320 |
private: |
5707 | 321 |
void visit(Value* n) { |
1 | 322 |
// Local instructions and Phis for expression stack values at the |
323 |
// start of basic blocks are not added to the instruction list |
|
7100 | 324 |
if (!(*n)->is_linked() && (*n)->can_be_linked()) { |
1 | 325 |
assert(false, "a node was not appended to the graph"); |
5707 | 326 |
Compilation::current()->bailout("a node was not appended to the graph"); |
1 | 327 |
} |
328 |
// use n's input if not visited before |
|
329 |
if (!(*n)->is_pinned() && !(*n)->has_uses()) { |
|
330 |
// note: a) if the instruction is pinned, it will be handled by compute_use_count |
|
331 |
// b) if the instruction has uses, it was touched before |
|
332 |
// => in both cases we don't need to update n's values |
|
333 |
uses_do(n); |
|
334 |
} |
|
335 |
// use n |
|
336 |
(*n)->_use_count++; |
|
337 |
} |
|
338 |
||
5707 | 339 |
Values* worklist; |
340 |
int depth; |
|
1 | 341 |
enum { |
342 |
max_recurse_depth = 20 |
|
343 |
}; |
|
344 |
||
5707 | 345 |
void uses_do(Value* n) { |
1 | 346 |
depth++; |
347 |
if (depth > max_recurse_depth) { |
|
348 |
// don't allow the traversal to recurse too deeply |
|
349 |
worklist->push(*n); |
|
350 |
} else { |
|
5707 | 351 |
(*n)->input_values_do(this); |
1 | 352 |
// special handling for some instructions |
353 |
if ((*n)->as_BlockEnd() != NULL) { |
|
354 |
// note on BlockEnd: |
|
355 |
// must 'use' the stack only if the method doesn't |
|
356 |
// terminate, however, in those cases stack is empty |
|
5707 | 357 |
(*n)->state_values_do(this); |
1 | 358 |
} |
359 |
} |
|
360 |
depth--; |
|
361 |
} |
|
362 |
||
5707 | 363 |
void block_do(BlockBegin* b) { |
1 | 364 |
depth = 0; |
365 |
// process all pinned nodes as the roots of expression trees |
|
366 |
for (Instruction* n = b; n != NULL; n = n->next()) { |
|
367 |
if (n->is_pinned()) uses_do(&n); |
|
368 |
} |
|
369 |
assert(depth == 0, "should have counted back down"); |
|
370 |
||
371 |
// now process any unpinned nodes which recursed too deeply |
|
372 |
while (worklist->length() > 0) { |
|
373 |
Value t = worklist->pop(); |
|
374 |
if (!t->is_pinned()) { |
|
375 |
// compute the use count |
|
376 |
uses_do(&t); |
|
377 |
||
378 |
// pin the instruction so that LIRGenerator doesn't recurse |
|
379 |
// too deeply during it's evaluation. |
|
380 |
t->pin(); |
|
381 |
} |
|
382 |
} |
|
383 |
assert(depth == 0, "should have counted back down"); |
|
384 |
} |
|
385 |
||
5707 | 386 |
UseCountComputer() { |
387 |
worklist = new Values(); |
|
388 |
depth = 0; |
|
389 |
} |
|
390 |
||
1 | 391 |
public: |
392 |
static void compute(BlockList* blocks) { |
|
5707 | 393 |
UseCountComputer ucc; |
394 |
blocks->iterate_backward(&ucc); |
|
1 | 395 |
} |
396 |
}; |
|
397 |
||
398 |
||
399 |
// helper macro for short definition of trace-output inside code |
|
400 |
#ifndef PRODUCT |
|
401 |
#define TRACE_LINEAR_SCAN(level, code) \ |
|
402 |
if (TraceLinearScanLevel >= level) { \ |
|
403 |
code; \ |
|
404 |
} |
|
405 |
#else |
|
406 |
#define TRACE_LINEAR_SCAN(level, code) |
|
407 |
#endif |
|
408 |
||
409 |
class ComputeLinearScanOrder : public StackObj { |
|
410 |
private: |
|
411 |
int _max_block_id; // the highest block_id of a block |
|
412 |
int _num_blocks; // total number of blocks (smaller than _max_block_id) |
|
413 |
int _num_loops; // total number of loops |
|
414 |
bool _iterative_dominators;// method requires iterative computation of dominatiors |
|
415 |
||
416 |
BlockList* _linear_scan_order; // the resulting list of blocks in correct order |
|
417 |
||
418 |
BitMap _visited_blocks; // used for recursive processing of blocks |
|
419 |
BitMap _active_blocks; // used for recursive processing of blocks |
|
420 |
BitMap _dominator_blocks; // temproary BitMap used for computation of dominator |
|
421 |
intArray _forward_branches; // number of incoming forward branches for each block |
|
422 |
BlockList _loop_end_blocks; // list of all loop end blocks collected during count_edges |
|
423 |
BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop |
|
424 |
BlockList _work_list; // temporary list (used in mark_loops and compute_order) |
|
425 |
||
6453 | 426 |
Compilation* _compilation; |
427 |
||
1 | 428 |
// accessors for _visited_blocks and _active_blocks |
429 |
void init_visited() { _active_blocks.clear(); _visited_blocks.clear(); } |
|
430 |
bool is_visited(BlockBegin* b) const { return _visited_blocks.at(b->block_id()); } |
|
431 |
bool is_active(BlockBegin* b) const { return _active_blocks.at(b->block_id()); } |
|
432 |
void set_visited(BlockBegin* b) { assert(!is_visited(b), "already set"); _visited_blocks.set_bit(b->block_id()); } |
|
433 |
void set_active(BlockBegin* b) { assert(!is_active(b), "already set"); _active_blocks.set_bit(b->block_id()); } |
|
434 |
void clear_active(BlockBegin* b) { assert(is_active(b), "not already"); _active_blocks.clear_bit(b->block_id()); } |
|
435 |
||
436 |
// accessors for _forward_branches |
|
437 |
void inc_forward_branches(BlockBegin* b) { _forward_branches.at_put(b->block_id(), _forward_branches.at(b->block_id()) + 1); } |
|
438 |
int dec_forward_branches(BlockBegin* b) { _forward_branches.at_put(b->block_id(), _forward_branches.at(b->block_id()) - 1); return _forward_branches.at(b->block_id()); } |
|
439 |
||
440 |
// accessors for _loop_map |
|
441 |
bool is_block_in_loop (int loop_idx, BlockBegin* b) const { return _loop_map.at(loop_idx, b->block_id()); } |
|
442 |
void set_block_in_loop (int loop_idx, BlockBegin* b) { _loop_map.set_bit(loop_idx, b->block_id()); } |
|
443 |
void clear_block_in_loop(int loop_idx, int block_id) { _loop_map.clear_bit(loop_idx, block_id); } |
|
444 |
||
445 |
// count edges between blocks |
|
446 |
void count_edges(BlockBegin* cur, BlockBegin* parent); |
|
447 |
||
448 |
// loop detection |
|
449 |
void mark_loops(); |
|
450 |
void clear_non_natural_loops(BlockBegin* start_block); |
|
451 |
void assign_loop_depth(BlockBegin* start_block); |
|
452 |
||
453 |
// computation of final block order |
|
454 |
BlockBegin* common_dominator(BlockBegin* a, BlockBegin* b); |
|
455 |
void compute_dominator(BlockBegin* cur, BlockBegin* parent); |
|
456 |
int compute_weight(BlockBegin* cur); |
|
457 |
bool ready_for_processing(BlockBegin* cur); |
|
458 |
void sort_into_work_list(BlockBegin* b); |
|
459 |
void append_block(BlockBegin* cur); |
|
460 |
void compute_order(BlockBegin* start_block); |
|
461 |
||
462 |
// fixup of dominators for non-natural loops |
|
463 |
bool compute_dominators_iter(); |
|
464 |
void compute_dominators(); |
|
465 |
||
466 |
// debug functions |
|
467 |
NOT_PRODUCT(void print_blocks();) |
|
468 |
DEBUG_ONLY(void verify();) |
|
469 |
||
6453 | 470 |
Compilation* compilation() const { return _compilation; } |
1 | 471 |
public: |
6453 | 472 |
ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block); |
1 | 473 |
|
474 |
// accessors for final result |
|
475 |
BlockList* linear_scan_order() const { return _linear_scan_order; } |
|
476 |
int num_loops() const { return _num_loops; } |
|
477 |
}; |
|
478 |
||
479 |
||
6453 | 480 |
ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block) : |
1 | 481 |
_max_block_id(BlockBegin::number_of_blocks()), |
482 |
_num_blocks(0), |
|
483 |
_num_loops(0), |
|
484 |
_iterative_dominators(false), |
|
485 |
_visited_blocks(_max_block_id), |
|
486 |
_active_blocks(_max_block_id), |
|
487 |
_dominator_blocks(_max_block_id), |
|
488 |
_forward_branches(_max_block_id, 0), |
|
489 |
_loop_end_blocks(8), |
|
490 |
_work_list(8), |
|
491 |
_linear_scan_order(NULL), // initialized later with correct size |
|
6453 | 492 |
_loop_map(0, 0), // initialized later with correct size |
493 |
_compilation(c) |
|
1 | 494 |
{ |
495 |
TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order"); |
|
496 |
||
497 |
init_visited(); |
|
498 |
count_edges(start_block, NULL); |
|
499 |
||
6453 | 500 |
if (compilation()->is_profiling()) { |
501 |
compilation()->method()->method_data()->set_compilation_stats(_num_loops, _num_blocks); |
|
502 |
} |
|
503 |
||
1 | 504 |
if (_num_loops > 0) { |
505 |
mark_loops(); |
|
506 |
clear_non_natural_loops(start_block); |
|
507 |
assign_loop_depth(start_block); |
|
508 |
} |
|
509 |
||
510 |
compute_order(start_block); |
|
511 |
compute_dominators(); |
|
512 |
||
513 |
NOT_PRODUCT(print_blocks()); |
|
514 |
DEBUG_ONLY(verify()); |
|
515 |
} |
|
516 |
||
517 |
||
518 |
// Traverse the CFG: |
|
519 |
// * count total number of blocks |
|
520 |
// * count all incoming edges and backward incoming edges |
|
521 |
// * number loop header blocks |
|
522 |
// * create a list with all loop end blocks |
|
523 |
void ComputeLinearScanOrder::count_edges(BlockBegin* cur, BlockBegin* parent) { |
|
524 |
TRACE_LINEAR_SCAN(3, tty->print_cr("Enter count_edges for block B%d coming from B%d", cur->block_id(), parent != NULL ? parent->block_id() : -1)); |
|
525 |
assert(cur->dominator() == NULL, "dominator already initialized"); |
|
526 |
||
527 |
if (is_active(cur)) { |
|
528 |
TRACE_LINEAR_SCAN(3, tty->print_cr("backward branch")); |
|
529 |
assert(is_visited(cur), "block must be visisted when block is active"); |
|
530 |
assert(parent != NULL, "must have parent"); |
|
531 |
||
532 |
cur->set(BlockBegin::linear_scan_loop_header_flag); |
|
533 |
cur->set(BlockBegin::backward_branch_target_flag); |
|
534 |
||
535 |
parent->set(BlockBegin::linear_scan_loop_end_flag); |
|
1547
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
536 |
|
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
537 |
// When a loop header is also the start of an exception handler, then the backward branch is |
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
538 |
// an exception edge. Because such edges are usually critical edges which cannot be split, the |
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
539 |
// loop must be excluded here from processing. |
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
540 |
if (cur->is_set(BlockBegin::exception_entry_flag)) { |
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
541 |
// Make sure that dominators are correct in this weird situation |
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
542 |
_iterative_dominators = true; |
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
543 |
return; |
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
544 |
} |
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
545 |
assert(parent->number_of_sux() == 1 && parent->sux_at(0) == cur, |
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
546 |
"loop end blocks must have one successor (critical edges are split)"); |
ebdd95407cd4
6758445: loop heads that are exception entry points can crash during count_edges/mark_loops
never
parents:
1
diff
changeset
|
547 |
|
1 | 548 |
_loop_end_blocks.append(parent); |
549 |
return; |
|
550 |
} |
|
551 |
||
552 |
// increment number of incoming forward branches |
|
553 |
inc_forward_branches(cur); |
|
554 |
||
555 |
if (is_visited(cur)) { |
|
556 |
TRACE_LINEAR_SCAN(3, tty->print_cr("block already visited")); |
|
557 |
return; |
|
558 |
} |
|
559 |
||
560 |
_num_blocks++; |
|
561 |
set_visited(cur); |
|
562 |
set_active(cur); |
|
563 |
||
564 |
// recursive call for all successors |
|
565 |
int i; |
|
566 |
for (i = cur->number_of_sux() - 1; i >= 0; i--) { |
|
567 |
count_edges(cur->sux_at(i), cur); |
|
568 |
} |
|
569 |
for (i = cur->number_of_exception_handlers() - 1; i >= 0; i--) { |
|
570 |
count_edges(cur->exception_handler_at(i), cur); |
|
571 |
} |
|
572 |
||
573 |
clear_active(cur); |
|
574 |
||
575 |
// Each loop has a unique number. |
|
576 |
// When multiple loops are nested, assign_loop_depth assumes that the |
|
577 |
// innermost loop has the lowest number. This is guaranteed by setting |
|
578 |
// the loop number after the recursive calls for the successors above |
|
579 |
// have returned. |
|
580 |
if (cur->is_set(BlockBegin::linear_scan_loop_header_flag)) { |
|
581 |
assert(cur->loop_index() == -1, "cannot set loop-index twice"); |
|
582 |
TRACE_LINEAR_SCAN(3, tty->print_cr("Block B%d is loop header of loop %d", cur->block_id(), _num_loops)); |
|
583 |
||
584 |
cur->set_loop_index(_num_loops); |
|
585 |
_num_loops++; |
|
586 |
} |
|
587 |
||
588 |
TRACE_LINEAR_SCAN(3, tty->print_cr("Finished count_edges for block B%d", cur->block_id())); |
|
589 |
} |
|
590 |
||
591 |
||
592 |
void ComputeLinearScanOrder::mark_loops() { |
|
593 |
TRACE_LINEAR_SCAN(3, tty->print_cr("----- marking loops")); |
|
594 |
||
595 |
_loop_map = BitMap2D(_num_loops, _max_block_id); |
|
596 |
_loop_map.clear(); |
|
597 |
||
598 |
for (int i = _loop_end_blocks.length() - 1; i >= 0; i--) { |
|
599 |
BlockBegin* loop_end = _loop_end_blocks.at(i); |
|
600 |
BlockBegin* loop_start = loop_end->sux_at(0); |
|
601 |
int loop_idx = loop_start->loop_index(); |
|
602 |
||
603 |
TRACE_LINEAR_SCAN(3, tty->print_cr("Processing loop from B%d to B%d (loop %d):", loop_start->block_id(), loop_end->block_id(), loop_idx)); |
|
604 |
assert(loop_end->is_set(BlockBegin::linear_scan_loop_end_flag), "loop end flag must be set"); |
|
605 |
assert(loop_end->number_of_sux() == 1, "incorrect number of successors"); |
|
606 |
assert(loop_start->is_set(BlockBegin::linear_scan_loop_header_flag), "loop header flag must be set"); |
|
607 |
assert(loop_idx >= 0 && loop_idx < _num_loops, "loop index not set"); |
|
608 |
assert(_work_list.is_empty(), "work list must be empty before processing"); |
|
609 |
||
610 |
// add the end-block of the loop to the working list |
|
611 |
_work_list.push(loop_end); |
|
612 |
set_block_in_loop(loop_idx, loop_end); |
|
613 |
do { |
|
614 |
BlockBegin* cur = _work_list.pop(); |
|
615 |
||
616 |
TRACE_LINEAR_SCAN(3, tty->print_cr(" processing B%d", cur->block_id())); |
|
617 |
assert(is_block_in_loop(loop_idx, cur), "bit in loop map must be set when block is in work list"); |
|
618 |
||
619 |
// recursive processing of all predecessors ends when start block of loop is reached |
|
620 |
if (cur != loop_start && !cur->is_set(BlockBegin::osr_entry_flag)) { |
|
621 |
for (int j = cur->number_of_preds() - 1; j >= 0; j--) { |
|
622 |
BlockBegin* pred = cur->pred_at(j); |
|
623 |
||
624 |
if (!is_block_in_loop(loop_idx, pred) /*&& !pred->is_set(BlockBeginosr_entry_flag)*/) { |
|
625 |
// this predecessor has not been processed yet, so add it to work list |
|
626 |
TRACE_LINEAR_SCAN(3, tty->print_cr(" pushing B%d", pred->block_id())); |
|
627 |
_work_list.push(pred); |
|
628 |
set_block_in_loop(loop_idx, pred); |
|
629 |
} |
|
630 |
} |
|
631 |
} |
|
632 |
} while (!_work_list.is_empty()); |
|
633 |
} |
|
634 |
} |
|
635 |
||
636 |
||
637 |
// check for non-natural loops (loops where the loop header does not dominate |
|
638 |
// all other loop blocks = loops with mulitple entries). |
|
639 |
// such loops are ignored |
|
640 |
void ComputeLinearScanOrder::clear_non_natural_loops(BlockBegin* start_block) { |
|
641 |
for (int i = _num_loops - 1; i >= 0; i--) { |
|
642 |
if (is_block_in_loop(i, start_block)) { |
|
643 |
// loop i contains the entry block of the method |
|
644 |
// -> this is not a natural loop, so ignore it |
|
645 |
TRACE_LINEAR_SCAN(2, tty->print_cr("Loop %d is non-natural, so it is ignored", i)); |
|
646 |
||
647 |
for (int block_id = _max_block_id - 1; block_id >= 0; block_id--) { |
|
648 |
clear_block_in_loop(i, block_id); |
|
649 |
} |
|
650 |
_iterative_dominators = true; |
|
651 |
} |
|
652 |
} |
|
653 |
} |
|
654 |
||
655 |
void ComputeLinearScanOrder::assign_loop_depth(BlockBegin* start_block) { |
|
656 |
TRACE_LINEAR_SCAN(3, "----- computing loop-depth and weight"); |
|
657 |
init_visited(); |
|
658 |
||
659 |
assert(_work_list.is_empty(), "work list must be empty before processing"); |
|
660 |
_work_list.append(start_block); |
|
661 |
||
662 |
do { |
|
663 |
BlockBegin* cur = _work_list.pop(); |
|
664 |
||
665 |
if (!is_visited(cur)) { |
|
666 |
set_visited(cur); |
|
667 |
TRACE_LINEAR_SCAN(4, tty->print_cr("Computing loop depth for block B%d", cur->block_id())); |
|
668 |
||
669 |
// compute loop-depth and loop-index for the block |
|
670 |
assert(cur->loop_depth() == 0, "cannot set loop-depth twice"); |
|
671 |
int i; |
|
672 |
int loop_depth = 0; |
|
673 |
int min_loop_idx = -1; |
|
674 |
for (i = _num_loops - 1; i >= 0; i--) { |
|
675 |
if (is_block_in_loop(i, cur)) { |
|
676 |
loop_depth++; |
|
677 |
min_loop_idx = i; |
|
678 |
} |
|
679 |
} |
|
680 |
cur->set_loop_depth(loop_depth); |
|
681 |
cur->set_loop_index(min_loop_idx); |
|
682 |
||
683 |
// append all unvisited successors to work list |
|
684 |
for (i = cur->number_of_sux() - 1; i >= 0; i--) { |
|
685 |
_work_list.append(cur->sux_at(i)); |
|
686 |
} |
|
687 |
for (i = cur->number_of_exception_handlers() - 1; i >= 0; i--) { |
|
688 |
_work_list.append(cur->exception_handler_at(i)); |
|
689 |
} |
|
690 |
} |
|
691 |
} while (!_work_list.is_empty()); |
|
692 |
} |
|
693 |
||
694 |
||
695 |
BlockBegin* ComputeLinearScanOrder::common_dominator(BlockBegin* a, BlockBegin* b) { |
|
696 |
assert(a != NULL && b != NULL, "must have input blocks"); |
|
697 |
||
698 |
_dominator_blocks.clear(); |
|
699 |
while (a != NULL) { |
|
700 |
_dominator_blocks.set_bit(a->block_id()); |
|
701 |
assert(a->dominator() != NULL || a == _linear_scan_order->at(0), "dominator must be initialized"); |
|
702 |
a = a->dominator(); |
|
703 |
} |
|
704 |
while (b != NULL && !_dominator_blocks.at(b->block_id())) { |
|
705 |
assert(b->dominator() != NULL || b == _linear_scan_order->at(0), "dominator must be initialized"); |
|
706 |
b = b->dominator(); |
|
707 |
} |
|
708 |
||
709 |
assert(b != NULL, "could not find dominator"); |
|
710 |
return b; |
|
711 |
} |
|
712 |
||
713 |
void ComputeLinearScanOrder::compute_dominator(BlockBegin* cur, BlockBegin* parent) { |
|
714 |
if (cur->dominator() == NULL) { |
|
715 |
TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: initializing dominator of B%d to B%d", cur->block_id(), parent->block_id())); |
|
716 |
cur->set_dominator(parent); |
|
717 |
||
718 |
} else if (!(cur->is_set(BlockBegin::linear_scan_loop_header_flag) && parent->is_set(BlockBegin::linear_scan_loop_end_flag))) { |
|
719 |
TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: computing dominator of B%d: common dominator of B%d and B%d is B%d", cur->block_id(), parent->block_id(), cur->dominator()->block_id(), common_dominator(cur->dominator(), parent)->block_id())); |
|
720 |
assert(cur->number_of_preds() > 1, ""); |
|
721 |
cur->set_dominator(common_dominator(cur->dominator(), parent)); |
|
722 |
} |
|
723 |
} |
|
724 |
||
725 |
||
726 |
int ComputeLinearScanOrder::compute_weight(BlockBegin* cur) { |
|
727 |
BlockBegin* single_sux = NULL; |
|
728 |
if (cur->number_of_sux() == 1) { |
|
729 |
single_sux = cur->sux_at(0); |
|
730 |
} |
|
731 |
||
732 |
// limit loop-depth to 15 bit (only for security reason, it will never be so big) |
|
733 |
int weight = (cur->loop_depth() & 0x7FFF) << 16; |
|
734 |
||
735 |
// general macro for short definition of weight flags |
|
736 |
// the first instance of INC_WEIGHT_IF has the highest priority |
|
737 |
int cur_bit = 15; |
|
738 |
#define INC_WEIGHT_IF(condition) if ((condition)) { weight |= (1 << cur_bit); } cur_bit--; |
|
739 |
||
740 |
// this is necessery for the (very rare) case that two successing blocks have |
|
741 |
// the same loop depth, but a different loop index (can happen for endless loops |
|
742 |
// with exception handlers) |
|
743 |
INC_WEIGHT_IF(!cur->is_set(BlockBegin::linear_scan_loop_header_flag)); |
|
744 |
||
745 |
// loop end blocks (blocks that end with a backward branch) are added |
|
746 |
// after all other blocks of the loop. |
|
747 |
INC_WEIGHT_IF(!cur->is_set(BlockBegin::linear_scan_loop_end_flag)); |
|
748 |
||
749 |
// critical edge split blocks are prefered because than they have a bigger |
|
750 |
// proability to be completely empty |
|
751 |
INC_WEIGHT_IF(cur->is_set(BlockBegin::critical_edge_split_flag)); |
|
752 |
||
753 |
// exceptions should not be thrown in normal control flow, so these blocks |
|
754 |
// are added as late as possible |
|
755 |
INC_WEIGHT_IF(cur->end()->as_Throw() == NULL && (single_sux == NULL || single_sux->end()->as_Throw() == NULL)); |
|
756 |
INC_WEIGHT_IF(cur->end()->as_Return() == NULL && (single_sux == NULL || single_sux->end()->as_Return() == NULL)); |
|
757 |
||
758 |
// exceptions handlers are added as late as possible |
|
759 |
INC_WEIGHT_IF(!cur->is_set(BlockBegin::exception_entry_flag)); |
|
760 |
||
761 |
// guarantee that weight is > 0 |
|
762 |
weight |= 1; |
|
763 |
||
764 |
#undef INC_WEIGHT_IF |
|
765 |
assert(cur_bit >= 0, "too many flags"); |
|
766 |
assert(weight > 0, "weight cannot become negative"); |
|
767 |
||
768 |
return weight; |
|
769 |
} |
|
770 |
||
771 |
bool ComputeLinearScanOrder::ready_for_processing(BlockBegin* cur) { |
|
772 |
// Discount the edge just traveled. |
|
773 |
// When the number drops to zero, all forward branches were processed |
|
774 |
if (dec_forward_branches(cur) != 0) { |
|
775 |
return false; |
|
776 |
} |
|
777 |
||
778 |
assert(_linear_scan_order->index_of(cur) == -1, "block already processed (block can be ready only once)"); |
|
779 |
assert(_work_list.index_of(cur) == -1, "block already in work-list (block can be ready only once)"); |
|
780 |
return true; |
|
781 |
} |
|
782 |
||
783 |
void ComputeLinearScanOrder::sort_into_work_list(BlockBegin* cur) { |
|
784 |
assert(_work_list.index_of(cur) == -1, "block already in work list"); |
|
785 |
||
786 |
int cur_weight = compute_weight(cur); |
|
787 |
||
788 |
// the linear_scan_number is used to cache the weight of a block |
|
789 |
cur->set_linear_scan_number(cur_weight); |
|
790 |
||
791 |
#ifndef PRODUCT |
|
792 |
if (StressLinearScan) { |
|
793 |
_work_list.insert_before(0, cur); |
|
794 |
return; |
|
795 |
} |
|
796 |
#endif |
|
797 |
||
798 |
_work_list.append(NULL); // provide space for new element |
|
799 |
||
800 |
int insert_idx = _work_list.length() - 1; |
|
801 |
while (insert_idx > 0 && _work_list.at(insert_idx - 1)->linear_scan_number() > cur_weight) { |
|
802 |
_work_list.at_put(insert_idx, _work_list.at(insert_idx - 1)); |
|
803 |
insert_idx--; |
|
804 |
} |
|
805 |
_work_list.at_put(insert_idx, cur); |
|
806 |
||
807 |
TRACE_LINEAR_SCAN(3, tty->print_cr("Sorted B%d into worklist. new worklist:", cur->block_id())); |
|
808 |
TRACE_LINEAR_SCAN(3, for (int i = 0; i < _work_list.length(); i++) tty->print_cr("%8d B%2d weight:%6x", i, _work_list.at(i)->block_id(), _work_list.at(i)->linear_scan_number())); |
|
809 |
||
810 |
#ifdef ASSERT |
|
811 |
for (int i = 0; i < _work_list.length(); i++) { |
|
812 |
assert(_work_list.at(i)->linear_scan_number() > 0, "weight not set"); |
|
813 |
assert(i == 0 || _work_list.at(i - 1)->linear_scan_number() <= _work_list.at(i)->linear_scan_number(), "incorrect order in worklist"); |
|
814 |
} |
|
815 |
#endif |
|
816 |
} |
|
817 |
||
818 |
void ComputeLinearScanOrder::append_block(BlockBegin* cur) { |
|
819 |
TRACE_LINEAR_SCAN(3, tty->print_cr("appending block B%d (weight 0x%6x) to linear-scan order", cur->block_id(), cur->linear_scan_number())); |
|
820 |
assert(_linear_scan_order->index_of(cur) == -1, "cannot add the same block twice"); |
|
821 |
||
822 |
// currently, the linear scan order and code emit order are equal. |
|
823 |
// therefore the linear_scan_number and the weight of a block must also |
|
824 |
// be equal. |
|
825 |
cur->set_linear_scan_number(_linear_scan_order->length()); |
|
826 |
_linear_scan_order->append(cur); |
|
827 |
} |
|
828 |
||
829 |
void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) { |
|
830 |
TRACE_LINEAR_SCAN(3, "----- computing final block order"); |
|
831 |
||
832 |
// the start block is always the first block in the linear scan order |
|
833 |
_linear_scan_order = new BlockList(_num_blocks); |
|
834 |
append_block(start_block); |
|
835 |
||
836 |
assert(start_block->end()->as_Base() != NULL, "start block must end with Base-instruction"); |
|
837 |
BlockBegin* std_entry = ((Base*)start_block->end())->std_entry(); |
|
838 |
BlockBegin* osr_entry = ((Base*)start_block->end())->osr_entry(); |
|
839 |
||
840 |
BlockBegin* sux_of_osr_entry = NULL; |
|
841 |
if (osr_entry != NULL) { |
|
842 |
// special handling for osr entry: |
|
843 |
// ignore the edge between the osr entry and its successor for processing |
|
844 |
// the osr entry block is added manually below |
|
845 |
assert(osr_entry->number_of_sux() == 1, "osr entry must have exactly one successor"); |
|
846 |
assert(osr_entry->sux_at(0)->number_of_preds() >= 2, "sucessor of osr entry must have two predecessors (otherwise it is not present in normal control flow"); |
|
847 |
||
848 |
sux_of_osr_entry = osr_entry->sux_at(0); |
|
849 |
dec_forward_branches(sux_of_osr_entry); |
|
850 |
||
851 |
compute_dominator(osr_entry, start_block); |
|
852 |
_iterative_dominators = true; |
|
853 |
} |
|
854 |
compute_dominator(std_entry, start_block); |
|
855 |
||
856 |
// start processing with standard entry block |
|
857 |
assert(_work_list.is_empty(), "list must be empty before processing"); |
|
858 |
||
859 |
if (ready_for_processing(std_entry)) { |
|
860 |
sort_into_work_list(std_entry); |
|
861 |
} else { |
|
862 |
assert(false, "the std_entry must be ready for processing (otherwise, the method has no start block)"); |
|
863 |
} |
|
864 |
||
865 |
do { |
|
866 |
BlockBegin* cur = _work_list.pop(); |
|
867 |
||
868 |
if (cur == sux_of_osr_entry) { |
|
869 |
// the osr entry block is ignored in normal processing, it is never added to the |
|
870 |
// work list. Instead, it is added as late as possible manually here. |
|
871 |
append_block(osr_entry); |
|
872 |
compute_dominator(cur, osr_entry); |
|
873 |
} |
|
874 |
append_block(cur); |
|
875 |
||
876 |
int i; |
|
877 |
int num_sux = cur->number_of_sux(); |
|
878 |
// changed loop order to get "intuitive" order of if- and else-blocks |
|
879 |
for (i = 0; i < num_sux; i++) { |
|
880 |
BlockBegin* sux = cur->sux_at(i); |
|
881 |
compute_dominator(sux, cur); |
|
882 |
if (ready_for_processing(sux)) { |
|
883 |
sort_into_work_list(sux); |
|
884 |
} |
|
885 |
} |
|
886 |
num_sux = cur->number_of_exception_handlers(); |
|
887 |
for (i = 0; i < num_sux; i++) { |
|
888 |
BlockBegin* sux = cur->exception_handler_at(i); |
|
889 |
compute_dominator(sux, cur); |
|
890 |
if (ready_for_processing(sux)) { |
|
891 |
sort_into_work_list(sux); |
|
892 |
} |
|
893 |
} |
|
894 |
} while (_work_list.length() > 0); |
|
895 |
} |
|
896 |
||
897 |
||
898 |
bool ComputeLinearScanOrder::compute_dominators_iter() { |
|
899 |
bool changed = false; |
|
900 |
int num_blocks = _linear_scan_order->length(); |
|
901 |
||
902 |
assert(_linear_scan_order->at(0)->dominator() == NULL, "must not have dominator"); |
|
903 |
assert(_linear_scan_order->at(0)->number_of_preds() == 0, "must not have predecessors"); |
|
904 |
for (int i = 1; i < num_blocks; i++) { |
|
905 |
BlockBegin* block = _linear_scan_order->at(i); |
|
906 |
||
907 |
BlockBegin* dominator = block->pred_at(0); |
|
908 |
int num_preds = block->number_of_preds(); |
|
909 |
for (int i = 1; i < num_preds; i++) { |
|
910 |
dominator = common_dominator(dominator, block->pred_at(i)); |
|
911 |
} |
|
912 |
||
913 |
if (dominator != block->dominator()) { |
|
914 |
TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: updating dominator of B%d from B%d to B%d", block->block_id(), block->dominator()->block_id(), dominator->block_id())); |
|
915 |
||
916 |
block->set_dominator(dominator); |
|
917 |
changed = true; |
|
918 |
} |
|
919 |
} |
|
920 |
return changed; |
|
921 |
} |
|
922 |
||
923 |
void ComputeLinearScanOrder::compute_dominators() { |
|
924 |
TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing dominators (iterative computation reqired: %d)", _iterative_dominators)); |
|
925 |
||
926 |
// iterative computation of dominators is only required for methods with non-natural loops |
|
927 |
// and OSR-methods. For all other methods, the dominators computed when generating the |
|
928 |
// linear scan block order are correct. |
|
929 |
if (_iterative_dominators) { |
|
930 |
do { |
|
931 |
TRACE_LINEAR_SCAN(1, tty->print_cr("DOM: next iteration of fix-point calculation")); |
|
932 |
} while (compute_dominators_iter()); |
|
933 |
} |
|
934 |
||
935 |
// check that dominators are correct |
|
936 |
assert(!compute_dominators_iter(), "fix point not reached"); |
|
937 |
} |
|
938 |
||
939 |
||
940 |
#ifndef PRODUCT |
|
941 |
void ComputeLinearScanOrder::print_blocks() { |
|
942 |
if (TraceLinearScanLevel >= 2) { |
|
943 |
tty->print_cr("----- loop information:"); |
|
944 |
for (int block_idx = 0; block_idx < _linear_scan_order->length(); block_idx++) { |
|
945 |
BlockBegin* cur = _linear_scan_order->at(block_idx); |
|
946 |
||
947 |
tty->print("%4d: B%2d: ", cur->linear_scan_number(), cur->block_id()); |
|
948 |
for (int loop_idx = 0; loop_idx < _num_loops; loop_idx++) { |
|
949 |
tty->print ("%d ", is_block_in_loop(loop_idx, cur)); |
|
950 |
} |
|
951 |
tty->print_cr(" -> loop_index: %2d, loop_depth: %2d", cur->loop_index(), cur->loop_depth()); |
|
952 |
} |
|
953 |
} |
|
954 |
||
955 |
if (TraceLinearScanLevel >= 1) { |
|
956 |
tty->print_cr("----- linear-scan block order:"); |
|
957 |
for (int block_idx = 0; block_idx < _linear_scan_order->length(); block_idx++) { |
|
958 |
BlockBegin* cur = _linear_scan_order->at(block_idx); |
|
959 |
tty->print("%4d: B%2d loop: %2d depth: %2d", cur->linear_scan_number(), cur->block_id(), cur->loop_index(), cur->loop_depth()); |
|
960 |
||
961 |
tty->print(cur->is_set(BlockBegin::exception_entry_flag) ? " ex" : " "); |
|
962 |
tty->print(cur->is_set(BlockBegin::critical_edge_split_flag) ? " ce" : " "); |
|
963 |
tty->print(cur->is_set(BlockBegin::linear_scan_loop_header_flag) ? " lh" : " "); |
|
964 |
tty->print(cur->is_set(BlockBegin::linear_scan_loop_end_flag) ? " le" : " "); |
|
965 |
||
966 |
if (cur->dominator() != NULL) { |
|
967 |
tty->print(" dom: B%d ", cur->dominator()->block_id()); |
|
968 |
} else { |
|
969 |
tty->print(" dom: NULL "); |
|
970 |
} |
|
971 |
||
972 |
if (cur->number_of_preds() > 0) { |
|
973 |
tty->print(" preds: "); |
|
974 |
for (int j = 0; j < cur->number_of_preds(); j++) { |
|
975 |
BlockBegin* pred = cur->pred_at(j); |
|
976 |
tty->print("B%d ", pred->block_id()); |
|
977 |
} |
|
978 |
} |
|
979 |
if (cur->number_of_sux() > 0) { |
|
980 |
tty->print(" sux: "); |
|
981 |
for (int j = 0; j < cur->number_of_sux(); j++) { |
|
982 |
BlockBegin* sux = cur->sux_at(j); |
|
983 |
tty->print("B%d ", sux->block_id()); |
|
984 |
} |
|
985 |
} |
|
986 |
if (cur->number_of_exception_handlers() > 0) { |
|
987 |
tty->print(" ex: "); |
|
988 |
for (int j = 0; j < cur->number_of_exception_handlers(); j++) { |
|
989 |
BlockBegin* ex = cur->exception_handler_at(j); |
|
990 |
tty->print("B%d ", ex->block_id()); |
|
991 |
} |
|
992 |
} |
|
993 |
tty->cr(); |
|
994 |
} |
|
995 |
} |
|
996 |
} |
|
997 |
#endif |
|
998 |
||
999 |
#ifdef ASSERT |
|
1000 |
void ComputeLinearScanOrder::verify() { |
|
1001 |
assert(_linear_scan_order->length() == _num_blocks, "wrong number of blocks in list"); |
|
1002 |
||
1003 |
if (StressLinearScan) { |
|
1004 |
// blocks are scrambled when StressLinearScan is used |
|
1005 |
return; |
|
1006 |
} |
|
1007 |
||
1008 |
// check that all successors of a block have a higher linear-scan-number |
|
1009 |
// and that all predecessors of a block have a lower linear-scan-number |
|
1010 |
// (only backward branches of loops are ignored) |
|
1011 |
int i; |
|
1012 |
for (i = 0; i < _linear_scan_order->length(); i++) { |
|
1013 |
BlockBegin* cur = _linear_scan_order->at(i); |
|
1014 |
||
1015 |
assert(cur->linear_scan_number() == i, "incorrect linear_scan_number"); |
|
1016 |
assert(cur->linear_scan_number() >= 0 && cur->linear_scan_number() == _linear_scan_order->index_of(cur), "incorrect linear_scan_number"); |
|
1017 |
||
1018 |
int j; |
|
1019 |
for (j = cur->number_of_sux() - 1; j >= 0; j--) { |
|
1020 |
BlockBegin* sux = cur->sux_at(j); |
|
1021 |
||
1022 |
assert(sux->linear_scan_number() >= 0 && sux->linear_scan_number() == _linear_scan_order->index_of(sux), "incorrect linear_scan_number"); |
|
1023 |
if (!cur->is_set(BlockBegin::linear_scan_loop_end_flag)) { |
|
1024 |
assert(cur->linear_scan_number() < sux->linear_scan_number(), "invalid order"); |
|
1025 |
} |
|
1026 |
if (cur->loop_depth() == sux->loop_depth()) { |
|
1027 |
assert(cur->loop_index() == sux->loop_index() || sux->is_set(BlockBegin::linear_scan_loop_header_flag), "successing blocks with same loop depth must have same loop index"); |
|
1028 |
} |
|
1029 |
} |
|
1030 |
||
1031 |
for (j = cur->number_of_preds() - 1; j >= 0; j--) { |
|
1032 |
BlockBegin* pred = cur->pred_at(j); |
|
1033 |
||
1034 |
assert(pred->linear_scan_number() >= 0 && pred->linear_scan_number() == _linear_scan_order->index_of(pred), "incorrect linear_scan_number"); |
|
1035 |
if (!cur->is_set(BlockBegin::linear_scan_loop_header_flag)) { |
|
1036 |
assert(cur->linear_scan_number() > pred->linear_scan_number(), "invalid order"); |
|
1037 |
} |
|
1038 |
if (cur->loop_depth() == pred->loop_depth()) { |
|
1039 |
assert(cur->loop_index() == pred->loop_index() || cur->is_set(BlockBegin::linear_scan_loop_header_flag), "successing blocks with same loop depth must have same loop index"); |
|
1040 |
} |
|
1041 |
||
1042 |
assert(cur->dominator()->linear_scan_number() <= cur->pred_at(j)->linear_scan_number(), "dominator must be before predecessors"); |
|
1043 |
} |
|
1044 |
||
1045 |
// check dominator |
|
1046 |
if (i == 0) { |
|
1047 |
assert(cur->dominator() == NULL, "first block has no dominator"); |
|
1048 |
} else { |
|
1049 |
assert(cur->dominator() != NULL, "all but first block must have dominator"); |
|
1050 |
} |
|
1051 |
assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0), "Single predecessor must also be dominator"); |
|
1052 |
} |
|
1053 |
||
1054 |
// check that all loops are continuous |
|
1055 |
for (int loop_idx = 0; loop_idx < _num_loops; loop_idx++) { |
|
1056 |
int block_idx = 0; |
|
1057 |
assert(!is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx)), "the first block must not be present in any loop"); |
|
1058 |
||
1059 |
// skip blocks before the loop |
|
1060 |
while (block_idx < _num_blocks && !is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx))) { |
|
1061 |
block_idx++; |
|
1062 |
} |
|
1063 |
// skip blocks of loop |
|
1064 |
while (block_idx < _num_blocks && is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx))) { |
|
1065 |
block_idx++; |
|
1066 |
} |
|
1067 |
// after the first non-loop block, there must not be another loop-block |
|
1068 |
while (block_idx < _num_blocks) { |
|
1069 |
assert(!is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx)), "loop not continuous in linear-scan order"); |
|
1070 |
block_idx++; |
|
1071 |
} |
|
1072 |
} |
|
1073 |
} |
|
1074 |
#endif |
|
1075 |
||
1076 |
||
1077 |
void IR::compute_code() { |
|
1078 |
assert(is_valid(), "IR must be valid"); |
|
1079 |
||
6453 | 1080 |
ComputeLinearScanOrder compute_order(compilation(), start()); |
1 | 1081 |
_num_loops = compute_order.num_loops(); |
1082 |
_code = compute_order.linear_scan_order(); |
|
1083 |
} |
|
1084 |
||
1085 |
||
1086 |
void IR::compute_use_counts() { |
|
1087 |
// make sure all values coming out of this block get evaluated. |
|
1088 |
int num_blocks = _code->length(); |
|
1089 |
for (int i = 0; i < num_blocks; i++) { |
|
1090 |
_code->at(i)->end()->state()->pin_stack_for_linear_scan(); |
|
1091 |
} |
|
1092 |
||
1093 |
// compute use counts |
|
1094 |
UseCountComputer::compute(_code); |
|
1095 |
} |
|
1096 |
||
1097 |
||
1098 |
void IR::iterate_preorder(BlockClosure* closure) { |
|
1099 |
assert(is_valid(), "IR must be valid"); |
|
1100 |
start()->iterate_preorder(closure); |
|
1101 |
} |
|
1102 |
||
1103 |
||
1104 |
void IR::iterate_postorder(BlockClosure* closure) { |
|
1105 |
assert(is_valid(), "IR must be valid"); |
|
1106 |
start()->iterate_postorder(closure); |
|
1107 |
} |
|
1108 |
||
1109 |
void IR::iterate_linear_scan_order(BlockClosure* closure) { |
|
1110 |
linear_scan_order()->iterate_forward(closure); |
|
1111 |
} |
|
1112 |
||
1113 |
||
1114 |
#ifndef PRODUCT |
|
1115 |
class BlockPrinter: public BlockClosure { |
|
1116 |
private: |
|
1117 |
InstructionPrinter* _ip; |
|
1118 |
bool _cfg_only; |
|
1119 |
bool _live_only; |
|
1120 |
||
1121 |
public: |
|
1122 |
BlockPrinter(InstructionPrinter* ip, bool cfg_only, bool live_only = false) { |
|
1123 |
_ip = ip; |
|
1124 |
_cfg_only = cfg_only; |
|
1125 |
_live_only = live_only; |
|
1126 |
} |
|
1127 |
||
1128 |
virtual void block_do(BlockBegin* block) { |
|
1129 |
if (_cfg_only) { |
|
1130 |
_ip->print_instr(block); tty->cr(); |
|
1131 |
} else { |
|
1132 |
block->print_block(*_ip, _live_only); |
|
1133 |
} |
|
1134 |
} |
|
1135 |
}; |
|
1136 |
||
1137 |
||
1138 |
void IR::print(BlockBegin* start, bool cfg_only, bool live_only) { |
|
1139 |
ttyLocker ttyl; |
|
1140 |
InstructionPrinter ip(!cfg_only); |
|
1141 |
BlockPrinter bp(&ip, cfg_only, live_only); |
|
1142 |
start->iterate_preorder(&bp); |
|
1143 |
tty->cr(); |
|
1144 |
} |
|
1145 |
||
1146 |
void IR::print(bool cfg_only, bool live_only) { |
|
1147 |
if (is_valid()) { |
|
1148 |
print(start(), cfg_only, live_only); |
|
1149 |
} else { |
|
1150 |
tty->print_cr("invalid IR"); |
|
1151 |
} |
|
1152 |
} |
|
1153 |
||
1154 |
||
1155 |
define_array(BlockListArray, BlockList*) |
|
1156 |
define_stack(BlockListList, BlockListArray) |
|
1157 |
||
1158 |
class PredecessorValidator : public BlockClosure { |
|
1159 |
private: |
|
1160 |
BlockListList* _predecessors; |
|
1161 |
BlockList* _blocks; |
|
1162 |
||
1163 |
static int cmp(BlockBegin** a, BlockBegin** b) { |
|
1164 |
return (*a)->block_id() - (*b)->block_id(); |
|
1165 |
} |
|
1166 |
||
1167 |
public: |
|
1168 |
PredecessorValidator(IR* hir) { |
|
1169 |
ResourceMark rm; |
|
1170 |
_predecessors = new BlockListList(BlockBegin::number_of_blocks(), NULL); |
|
1171 |
_blocks = new BlockList(); |
|
1172 |
||
1173 |
int i; |
|
1174 |
hir->start()->iterate_preorder(this); |
|
1175 |
if (hir->code() != NULL) { |
|
1176 |
assert(hir->code()->length() == _blocks->length(), "must match"); |
|
1177 |
for (i = 0; i < _blocks->length(); i++) { |
|
1178 |
assert(hir->code()->contains(_blocks->at(i)), "should be in both lists"); |
|
1179 |
} |
|
1180 |
} |
|
1181 |
||
1182 |
for (i = 0; i < _blocks->length(); i++) { |
|
1183 |
BlockBegin* block = _blocks->at(i); |
|
1184 |
BlockList* preds = _predecessors->at(block->block_id()); |
|
1185 |
if (preds == NULL) { |
|
1186 |
assert(block->number_of_preds() == 0, "should be the same"); |
|
1187 |
continue; |
|
1188 |
} |
|
1189 |
||
1190 |
// clone the pred list so we can mutate it |
|
1191 |
BlockList* pred_copy = new BlockList(); |
|
1192 |
int j; |
|
1193 |
for (j = 0; j < block->number_of_preds(); j++) { |
|
1194 |
pred_copy->append(block->pred_at(j)); |
|
1195 |
} |
|
1196 |
// sort them in the same order |
|
1197 |
preds->sort(cmp); |
|
1198 |
pred_copy->sort(cmp); |
|
1199 |
int length = MIN2(preds->length(), block->number_of_preds()); |
|
1200 |
for (j = 0; j < block->number_of_preds(); j++) { |
|
1201 |
assert(preds->at(j) == pred_copy->at(j), "must match"); |
|
1202 |
} |
|
1203 |
||
1204 |
assert(preds->length() == block->number_of_preds(), "should be the same"); |
|
1205 |
} |
|
1206 |
} |
|
1207 |
||
1208 |
virtual void block_do(BlockBegin* block) { |
|
1209 |
_blocks->append(block); |
|
1210 |
BlockEnd* be = block->end(); |
|
1211 |
int n = be->number_of_sux(); |
|
1212 |
int i; |
|
1213 |
for (i = 0; i < n; i++) { |
|
1214 |
BlockBegin* sux = be->sux_at(i); |
|
1215 |
assert(!sux->is_set(BlockBegin::exception_entry_flag), "must not be xhandler"); |
|
1216 |
||
1217 |
BlockList* preds = _predecessors->at_grow(sux->block_id(), NULL); |
|
1218 |
if (preds == NULL) { |
|
1219 |
preds = new BlockList(); |
|
1220 |
_predecessors->at_put(sux->block_id(), preds); |
|
1221 |
} |
|
1222 |
preds->append(block); |
|
1223 |
} |
|
1224 |
||
1225 |
n = block->number_of_exception_handlers(); |
|
1226 |
for (i = 0; i < n; i++) { |
|
1227 |
BlockBegin* sux = block->exception_handler_at(i); |
|
1228 |
assert(sux->is_set(BlockBegin::exception_entry_flag), "must be xhandler"); |
|
1229 |
||
1230 |
BlockList* preds = _predecessors->at_grow(sux->block_id(), NULL); |
|
1231 |
if (preds == NULL) { |
|
1232 |
preds = new BlockList(); |
|
1233 |
_predecessors->at_put(sux->block_id(), preds); |
|
1234 |
} |
|
1235 |
preds->append(block); |
|
1236 |
} |
|
1237 |
} |
|
1238 |
}; |
|
1239 |
||
1240 |
void IR::verify() { |
|
1241 |
#ifdef ASSERT |
|
1242 |
PredecessorValidator pv(this); |
|
1243 |
#endif |
|
1244 |
} |
|
1245 |
||
1246 |
#endif // PRODUCT |
|
1247 |
||
5707 | 1248 |
void SubstitutionResolver::visit(Value* v) { |
1 | 1249 |
Value v0 = *v; |
1250 |
if (v0) { |
|
1251 |
Value vs = v0->subst(); |
|
1252 |
if (vs != v0) { |
|
1253 |
*v = v0->subst(); |
|
1254 |
} |
|
1255 |
} |
|
1256 |
} |
|
1257 |
||
1258 |
#ifdef ASSERT |
|
5707 | 1259 |
class SubstitutionChecker: public ValueVisitor { |
1260 |
void visit(Value* v) { |
|
1261 |
Value v0 = *v; |
|
1262 |
if (v0) { |
|
1263 |
Value vs = v0->subst(); |
|
1264 |
assert(vs == v0, "missed substitution"); |
|
1265 |
} |
|
1 | 1266 |
} |
5707 | 1267 |
}; |
1 | 1268 |
#endif |
1269 |
||
1270 |
||
1271 |
void SubstitutionResolver::block_do(BlockBegin* block) { |
|
1272 |
Instruction* last = NULL; |
|
1273 |
for (Instruction* n = block; n != NULL;) { |
|
5707 | 1274 |
n->values_do(this); |
1 | 1275 |
// need to remove this instruction from the instruction stream |
1276 |
if (n->subst() != n) { |
|
1277 |
assert(last != NULL, "must have last"); |
|
6745 | 1278 |
last->set_next(n->next()); |
1 | 1279 |
} else { |
1280 |
last = n; |
|
1281 |
} |
|
1282 |
n = last->next(); |
|
1283 |
} |
|
1284 |
||
1285 |
#ifdef ASSERT |
|
5707 | 1286 |
SubstitutionChecker check_substitute; |
1287 |
if (block->state()) block->state()->values_do(&check_substitute); |
|
1288 |
block->block_values_do(&check_substitute); |
|
1289 |
if (block->end() && block->end()->state()) block->end()->state()->values_do(&check_substitute); |
|
1 | 1290 |
#endif |
1291 |
} |